metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jingw/dconf-manager",
"score": 3
} |
#### File: jingw/dconf-manager/dconf_manager.py
```python
from __future__ import annotations
import argparse
import configparser
import posixpath
import subprocess
from typing import Generic
from typing import Sequence
from typing import TypeVar
from typing import cast
IGNORED = "\033[38;5;244m? "
REMOVE = "\033[31m< "
ADD = "\033[32m> "
def format_kv(section: str, option: str, value: str) -> str:
return posixpath.join(section, option) + "=" + value
T = TypeVar("T")
class HierarchicalSet(Generic[T]):
"""A set of paths [a, b, c, ...]
The root is represented by [].
Adding a path X causes all paths starting with X to be part of the set.
For example, adding "a" causes "a/b" to be a member.
However, adding "a/b" does not cause "a" to be a member.
"""
def __init__(self) -> None:
"""Create an empty set"""
self._children: dict[T, HierarchicalSet[T]] | None = {}
# Map from child to HierarchicalSet.
# If empty, this set is empty.
# If None, this set contains all children.
def _add(self, path: Sequence[T], i: int) -> None:
if self._children is not None:
if i < len(path):
if path[i] not in self._children:
self._children[path[i]] = HierarchicalSet()
self._children[path[i]]._add(path, i + 1)
else:
# we have everything under here
self._children = None
def add(self, path: Sequence[T]) -> None:
self._add(path, 0)
def __contains__(self, path: Sequence[T], i: int = 0) -> bool:
"""Check if everything in the given path is in this set
Empty list means to check if the entirety of this node is in the set
"""
if self._children is None:
return True
elif i < len(path):
if path[i] in self._children:
return self._children[path[i]].__contains__(path, i + 1)
else:
return False
else:
return self._children is None
def _expand_tree(self) -> Sequence[tuple[int, T | None]]:
if self._children is None:
return [(0, None)]
else:
result: list[tuple[int, T | None]] = []
for k, v in sorted(self._children.items()):
result.append((0, k))
for level, item in v._expand_tree():
result.append((1 + level, item))
return result
def __str__(self) -> str:
parts = []
for level, item in self._expand_tree():
if item is None:
parts.append(" " * level + "*")
else:
parts.append(" " * level + str(item))
return "\n".join(parts)
def dconf_dump(root: str) -> str:
output: bytes = subprocess.check_output(["dconf", "dump", root])
return output.decode()
def dconf_write(key: str, value: str) -> None:
subprocess.check_call(["dconf", "write", key, value])
def dconf_reset(key: str) -> None:
subprocess.check_call(["dconf", "reset", key])
class ConfigParser(configparser.ConfigParser):
def __init__(self) -> None:
super().__init__(interpolation=None)
def optionxform(self, optionstr: str) -> str:
return optionstr
def main(argv: Sequence[str] | None) -> None:
parser = argparse.ArgumentParser(
description="Tool for managing dconf settings",
)
parser.add_argument(
"-a",
"--apply",
default=False,
action="store_true",
help="if not passed, only show a diff",
)
parser.add_argument("config", type=open, nargs="+", help="INI files to load")
parser.add_argument(
"--root", default="/", help="all actions will be relative to this root"
)
parser.add_argument(
"-i",
"--show-ignored",
default=False,
action="store_true",
help="if true, print unmanaged options",
)
args = parser.parse_args(argv)
root = args.root
dconf_output = dconf_dump(root)
dconf_config = ConfigParser()
dconf_config.read_string(dconf_output)
def write(section: str, option: str, value: str, apply: bool) -> None:
key = posixpath.join(root, section, option)
print(ADD + format_kv(section, option, value))
if apply:
dconf_write(key, value)
def reset(section: str, option: str, value: str, apply: bool) -> None:
key = posixpath.join(root, section, option)
print(REMOVE + format_kv(section, option, value))
if apply:
dconf_reset(key)
desired_config = ConfigParser()
for f in args.config:
desired_config.read_file(f)
f.close()
# excluded sections override managed sections
managed_sections = HierarchicalSet[str]()
excluded_sections = HierarchicalSet[str]()
for section in desired_config:
if section.startswith("-"):
excluded_sections.add(section[1:].split("/"))
else:
managed_sections.add(section.split("/"))
sections_union = sorted(
set(dconf_config.keys())
| set(k for k in desired_config.keys() if not k.startswith("-"))
)
for section in sections_union:
section_parts = section.split("/")
if section_parts in excluded_sections or section_parts not in managed_sections:
# Section is not managed at all.
if args.show_ignored:
for option, value in dconf_config[section].items():
print(IGNORED + format_kv(section, option, value))
elif section not in dconf_config:
# Adding a new section.
for option, value in desired_config[section].items():
write(section, option, value, args.apply)
else:
# Section is present and managed, so diff at option level.
dconf_section = dconf_config[section]
# But it might be managed at a higher level, so it might not be in desired_config.
# In that case we'll end up resetting everything.
desired_section = (
desired_config[section]
if section in desired_config
else cast(dict[str, str], {})
)
for option in sorted(
set(dconf_section.keys()) | set(desired_section.keys())
):
if option not in dconf_section:
write(section, option, desired_section[option], args.apply)
elif option not in desired_section:
reset(section, option, dconf_section[option], args.apply)
elif dconf_section[option] != desired_section[option]:
reset(section, option, dconf_section[option], False)
write(section, option, desired_section[option], args.apply)
else:
# option is equal, do nothing
pass
if __name__ == "__main__":
main(None)
```
#### File: jingw/dconf-manager/test_dconf_manager.py
```python
from __future__ import annotations
import io
import os
import subprocess
import textwrap
import unittest
from typing import Sequence
from unittest import mock
from dconf_manager import HierarchicalSet
from dconf_manager import main
EXPECTED_OUTPUT = """\
\033[32m> add/AddedKey=1
\033[31m< clear/foo/bar/blah=50
\033[31m< overwrite/a=1
\033[32m> overwrite/a=10
\033[31m< overwrite/b=2
\033[32m> overwrite/new=5
"""
EXPECTED_OUTPUT_WITH_IGNORED = """\
\033[32m> add/AddedKey=1
\033[38;5;244m? clear/keep=5
\033[31m< clear/foo/bar/blah=50
\033[38;5;244m? clear/foo/bar/exclude/no=1
\033[38;5;244m? clear/food/hi=1
\033[38;5;244m? ignored/a=1
\033[31m< overwrite/a=1
\033[32m> overwrite/a=10
\033[31m< overwrite/b=2
\033[32m> overwrite/new=5
"""
class TestDconfManager(unittest.TestCase):
def test_hierarchical_set(self) -> None:
s = HierarchicalSet[int]()
assert str(s) == ""
assert [] not in s
assert [0] not in s
s.add([1, 2, 3])
assert str(s) == textwrap.dedent(
"""\
1
2
3
*"""
)
s.add([1, 2, 3, 4])
assert str(s) == textwrap.dedent(
"""\
1
2
3
*"""
)
assert [] not in s
assert [0] not in s
assert [1] not in s
assert [1, 2] not in s
assert [1, 2, 3] in s
assert [1, 2, 3, 3] in s
assert (1, 2, 3, 4) in s
s.add((1, 2))
assert str(s) == textwrap.dedent(
"""\
1
2
*"""
)
assert [] not in s
assert [0] not in s
assert [1] not in s
assert [1, 2] in s
assert [1, 2, 3] in s
assert [1, 2, 3, 3] in s
assert (1, 2, 3, 4) in s
s.add([2])
assert str(s) == textwrap.dedent(
"""\
1
2
*
2
*"""
)
assert [] not in s
assert [0] not in s
assert [1] not in s
assert [2] in s
assert [2, 5] in s
s.add([])
assert str(s) == "*"
assert [] in s
assert [0] in s
assert [1] in s
assert [5, 6, 7, 8] in s
@mock.patch("dconf_manager.dconf_dump")
@mock.patch("dconf_manager.dconf_write")
@mock.patch("dconf_manager.dconf_reset")
def _test_main(
self,
apply: bool,
show_ignored: bool,
reset: mock.Mock,
write: mock.Mock,
dump: mock.Mock,
) -> tuple[Sequence[tuple[object, ...]], Sequence[tuple[object, ...]], str]:
config = textwrap.dedent(
"""\
[ignored]
a=1
[overwrite]
a=1
b=2
[clear]
keep=5
[clear/foo/bar]
blah=50
[clear/foo/bar/exclude]
no=1
[clear/food]
hi=1
"""
)
dump.return_value = config
input = os.path.join(os.path.dirname(__file__), "test-data", "input.ini")
stdout = io.StringIO()
args = [input, "--root", "/the/root"]
if show_ignored:
args.append("--show-ignored")
if apply:
args.append("--apply")
with mock.patch("sys.stdout", stdout):
main(args)
dump.assert_called_once_with("/the/root")
return write.call_args_list, reset.call_args_list, stdout.getvalue()
def test_diff(self) -> None:
writes, resets, stdout = self._test_main(False, False)
assert not writes
assert not resets
assert stdout == EXPECTED_OUTPUT
def test_diff_with_ignored(self) -> None:
writes, resets, stdout = self._test_main(False, True)
assert not writes
assert not resets
assert stdout == EXPECTED_OUTPUT_WITH_IGNORED
def test_apply(self) -> None:
writes, resets, stdout = self._test_main(True, False)
assert writes == [
mock.call("/the/root/add/AddedKey", "1"),
mock.call("/the/root/overwrite/a", "10"),
mock.call("/the/root/overwrite/new", "5"),
]
assert resets == [
mock.call("/the/root/clear/foo/bar/blah"),
mock.call("/the/root/overwrite/b"),
]
assert stdout == EXPECTED_OUTPUT
class TestTools(unittest.TestCase):
def test_black(self) -> None:
subprocess.check_call(["black", "--check", os.path.dirname(__file__)])
def test_flake8(self) -> None:
subprocess.check_call(["flake8"], cwd=os.path.dirname(__file__))
def test_isort(self) -> None:
subprocess.check_call(
["isort", "--check-only", "--diff", os.path.dirname(__file__)]
)
def test_mypy(self) -> None:
subprocess.check_call(["mypy", os.path.dirname(__file__)])
``` |
{
"source": "JingweiJ/JointActorActionSeg",
"score": 3
} |
#### File: JointActorActionSeg/models/batchnorm.py
```python
import keras.layers as KL
class BatchNorm(KL.BatchNormalization):
"""Batch Normalization class. Subclasses the Keras BN class and
hardcodes training=False so the BN layer doesn't update
during training.
Batch normalization has a negative effect on training if batches are small
so we disable it here.
"""
def call(self, inputs, training=None):
return super(self.__class__, self).call(inputs, training=False)
```
#### File: JointActorActionSeg/models/proposal_layer.py
```python
import sys
sys.path.append('..')
import utils.matterport_utils as matterport_utils
import numpy as np
import tensorflow as tf
import keras.layers as KL
import keras.engine as KE
''' Basically, the proposal layer does non-max suppression on the bbox according
to fg prob output by rpn.
'''
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, 4] where each row is y1, x1, y2, x2
deltas: [N, 4] where each row is [dy, dx, log(dh), log(dw)]
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, 4] each row is y1, x1, y2, x2
window: [4] in the form y1, x1, y2, x2
"""
# Split corners
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinment detals to anchors.
Inputs:
rpn_probs: [batch, anchors, (bg prob, fg prob)]
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, anchors,
config=None, **kwargs):
"""
anchors: [N, (y1, x1, y2, x2)] anchors defined in image coordinates
"""
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
self.anchors = anchors.astype(np.float32)
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Base anchors
anchors = self.anchors
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = min(10000, self.anchors.shape[0])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True, name="top_anchors").indices
scores = matterport_utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = matterport_utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
anchors = matterport_utils.batch_slice(ix, lambda x: tf.gather(anchors, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = matterport_utils.batch_slice([anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. [batch, N, (y1, x1, y2, x2)]
height, width = self.config.IMAGE_SHAPE[:2]
window = np.array([0, 0, height, width]).astype(np.float32)
boxes = matterport_utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to <NAME>'s paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Normalize dimensions to range of 0 to 1.
normalized_boxes = boxes / np.array([[height, width, height, width]])
# Non-max suppression
def nms(normalized_boxes, scores):
indices = tf.image.non_max_suppression(
normalized_boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(normalized_boxes, indices)
# Pad if needed
padding = self.proposal_count - tf.shape(proposals)[0]
proposals = tf.concat([proposals, tf.zeros([padding, 4])], 0)
return proposals
proposals = matterport_utils.batch_slice([normalized_boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
``` |
{
"source": "JingweiToo/Deep-Learning-Toolbox-Python",
"score": 3
} |
#### File: Deep-Learning-Toolbox-Python/DL/alexnet.py
```python
import tensorflow as tf
from tensorflow.keras import layers, models
def jho(train_ds, valid_ds, num_class):
# parameters
max_epochs = 2
# desired input_shape=(224, 224, 3)
input_shape = (224, 224, 3)
# AlexNet model
model = models.Sequential([
# 1st convolution
layers.Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu',
input_shape=input_shape),
layers.BatchNormalization(),
layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
# 2nd convolution
layers.Conv2D(filters=256, kernel_size=(5,5), strides=(1,1), activation='relu', padding="same"),
layers.BatchNormalization(),
layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
# 3rd convolution
layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
layers.BatchNormalization(),
# 4th convolution
layers.Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
layers.BatchNormalization(),
# 5th convolution
layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same"),
layers.BatchNormalization(),
layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
# flatten
layers.Flatten(),
# 1st fully connected
layers.Dense(4096, activation='relu'),
layers.Dropout(0.5),
# 2nd fully connected
layers.Dense(4096, activation='relu'),
layers.Dropout(0.5),
# 3rd fully connected softmax
layers.Dense(num_class, activation='softmax')
])
# build model
model.compile(loss='sparse_categorical_crossentropy',
optimizer=tf.optimizers.SGD(lr=0.001),
metrics=['accuracy'])
# train & validate model
history = model.fit(train_ds,
epochs=max_epochs,
validation_data=valid_ds,
validation_freq=1)
return model, history
``` |
{
"source": "JingweiToo/Feature-Selection-Toolbox",
"score": 3
} |
#### File: Feature-Selection-Toolbox/FS/fpa.py
```python
import numpy as np
from numpy.random import rand
from FS.functionHO import Fun
import math
def init_position(lb, ub, N, dim):
X = np.zeros([N, dim], dtype='float')
for i in range(N):
for d in range(dim):
X[i,d] = lb[0,d] + (ub[0,d] - lb[0,d]) * rand()
return X
def binary_conversion(X, thres, N, dim):
Xbin = np.zeros([N, dim], dtype='int')
for i in range(N):
for d in range(dim):
if X[i,d] > thres:
Xbin[i,d] = 1
else:
Xbin[i,d] = 0
return Xbin
def boundary(x, lb, ub):
if x < lb:
x = lb
if x > ub:
x = ub
return x
# Levy Flight
def levy_distribution(beta, dim):
# Sigma
nume = math.gamma(1 + beta) * np.sin(np.pi * beta / 2)
deno = math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2)
sigma = (nume / deno) ** (1 / beta)
# Parameter u & v
u = np.random.randn(dim) * sigma
v = np.random.randn(dim)
# Step
step = u / abs(v) ** (1 / beta)
LF = 0.01 * step
return LF
def jfs(xtrain, ytrain, opts):
# Parameters
ub = 1
lb = 0
thres = 0.5
beta = 1.5 # levy component
P = 0.8 # switch probability
N = opts['N']
max_iter = opts['T']
if 'P' in opts:
P = opts['P']
if 'beta' in opts:
beta = opts['beta']
# Dimension
dim = np.size(xtrain, 1)
if np.size(lb) == 1:
ub = ub * np.ones([1, dim], dtype='float')
lb = lb * np.ones([1, dim], dtype='float')
# Initialize position
X = init_position(lb, ub, N, dim)
# Binary conversion
Xbin = binary_conversion(X, thres, N, dim)
# Fitness at first iteration
fit = np.zeros([N, 1], dtype='float')
Xgb = np.zeros([1, dim], dtype='float')
fitG = float('inf')
for i in range(N):
fit[i,0] = Fun(xtrain, ytrain, Xbin[i,:], opts)
if fit[i,0] < fitG:
Xgb[0,:] = X[i,:]
fitG = fit[i,0]
# Pre
curve = np.zeros([1, max_iter], dtype='float')
t = 0
curve[0,t] = fitG.copy()
print("Generation:", t + 1)
print("Best (FPA):", curve[0,t])
t += 1
while t < max_iter:
Xnew = np.zeros([N, dim], dtype='float')
for i in range(N):
# Global pollination
if rand() < P:
# Levy distribution (2)
L = levy_distribution(beta, dim)
for d in range(dim):
# Global pollination (1)
Xnew[i,d] = X[i,d] + L[d] * (X[i,d] - Xgb[0,d])
# Boundary
Xnew[i,d] = boundary(Xnew[i,d], lb[0,d], ub[0,d])
# Local pollination
else:
# Different flower j, k in same species
R = np.random.permutation(N)
J = R[0]
K = R[1]
# Epsilon [0 to 1]
eps = rand()
for d in range(dim):
# Local pollination (3)
Xnew[i,d] = X[i,d] + eps * (X[J,d] - X[K,d])
# Boundary
Xnew[i,d] = boundary(Xnew[i,d], lb[0,d], ub[0,d])
# Binary conversion
Xbin = binary_conversion(Xnew, thres, N, dim)
# Greedy selection
for i in range(N):
Fnew = Fun(xtrain, ytrain, Xbin[i,:], opts)
if Fnew <= fit[i,0]:
X[i,:] = Xnew[i,:]
fit[i,0] = Fnew
if fit[i,0] < fitG:
Xgb[0,:] = X[i,:]
fitG = fit[i,0]
# Store result
curve[0,t] = fitG.copy()
print("Generation:", t + 1)
print("Best (FPA):", curve[0,t])
t += 1
# Best feature subset
Gbin = binary_conversion(Xgb, thres, 1, dim)
Gbin = Gbin.reshape(dim)
pos = np.asarray(range(0, dim))
sel_index = pos[Gbin == 1]
num_feat = len(sel_index)
# Create dictionary
fpa_data = {'sf': sel_index, 'c': curve, 'nf': num_feat}
return fpa_data
```
#### File: Feature-Selection-Toolbox/FS/ja.py
```python
import numpy as np
from numpy.random import rand
from FS.functionHO import Fun
def init_position(lb, ub, N, dim):
X = np.zeros([N, dim], dtype='float')
for i in range(N):
for d in range(dim):
X[i,d] = lb[0,d] + (ub[0,d] - lb[0,d]) * rand()
return X
def binary_conversion(X, thres, N, dim):
Xbin = np.zeros([N, dim], dtype='int')
for i in range(N):
for d in range(dim):
if X[i,d] > thres:
Xbin[i,d] = 1
else:
Xbin[i,d] = 0
return Xbin
def boundary(x, lb, ub):
if x < lb:
x = lb
if x > ub:
x = ub
return x
def jfs(xtrain, ytrain, opts):
# Parameters
ub = 1
lb = 0
thres = 0.5
N = opts['N']
max_iter = opts['T']
# Dimension
dim = np.size(xtrain, 1)
if np.size(lb) == 1:
ub = ub * np.ones([1, dim], dtype='float')
lb = lb * np.ones([1, dim], dtype='float')
# Initialize position
X = init_position(lb, ub, N, dim)
# Binary conversion
Xbin = binary_conversion(X, thres, N, dim)
# Fitness at first iteration
fit = np.zeros([N, 1], dtype='float')
Xgb = np.zeros([1, dim], dtype='float')
fitG = float('inf')
for i in range(N):
fit[i,0] = Fun(xtrain, ytrain, Xbin[i,:], opts)
if fit[i,0] < fitG:
Xgb[0,:] = X[i,:]
fitG = fit[i,0]
# Pre
curve = np.zeros([1, max_iter], dtype='float')
t = 0
curve[0,t] = fitG.copy()
print("Generation:", t + 1)
print("Best (JA):", curve[0,t])
t += 1
while t < max_iter:
Xnew = np.zeros([N, dim], dtype='float')
# Identify best & worst in population
idx_max = np.argmax(fit)
Xw = X[idx_max,np.newaxis,:].copy()
idx_min = np.argmin(fit)
Xb = X[idx_min,np.newaxis,:].copy()
for i in range(N):
for d in range(dim):
# Random numbers
r1 = rand();
r2 = rand();
# Position update (1)
Xnew[i,d] = X[i,d] + r1 * (Xb[0,d] - abs(X[i,d])) - r2 * (Xw[0,d] - abs(X[i,d]))
# Boundary
Xnew[i,d] = boundary(Xnew[i,d], lb[0,d], ub[0,d])
# Binary conversion
Xbin = binary_conversion(Xnew, thres, N, dim)
# Greedy selection
for i in range(N):
Fnew = Fun(xtrain, ytrain, Xbin[i,:], opts)
if Fnew < fit[i,0]:
X[i,:] = Xnew[i,:]
fit[i,0] = Fnew
if fit[i,0] < fitG:
Xgb[0,:] = X[i,:]
fitG = fit[i,0]
# Store result
curve[0,t] = fitG.copy()
print("Generation:", t + 1)
print("Best (JA):", curve[0,t])
t += 1
# Best feature subset
Gbin = binary_conversion(Xgb, thres, 1, dim)
Gbin = Gbin.reshape(dim)
pos = np.asarray(range(0, dim))
sel_index = pos[Gbin == 1]
num_feat = len(sel_index)
# Create dictionary
ja_data = {'sf': sel_index, 'c': curve, 'nf': num_feat}
return ja_data
``` |
{
"source": "JingweiToo/Machine-Learning-Regression-Toolbox",
"score": 3
} |
#### File: Machine-Learning-Regression-Toolbox/MLR/lr.py
```python
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import LeaveOneOut
from sklearn.metrics import r2_score
def jho(feat, label, opts):
ho = 0.3 # ratio of testing set
if 'ho' in opts:
ho = opts['ho']
# number of instances
num_data = np.size(feat, 0)
label = label.reshape(num_data) # Solve bug
# prepare data
xtrain, xtest, ytrain, ytest = train_test_split(feat, label, test_size=ho)
# train model
mdl = LinearRegression()
mdl.fit(xtrain, ytrain)
# prediction
ypred = mdl.predict(xtest)
# mean square error
mse = np.mean((ytest - ypred) ** 2)
# r2 score
r2 = r2_score(ytest, ypred)
print("Mean Square Error (LR_HO):", mse)
print("R Square Score (LR_HO):", r2)
lr = {'mse': mse, 'r2': r2, 'xtest': xtest, 'ytest': ytest, 'ypred': ypred}
return lr
def jkfold(feat, label, opts):
kfold = 10 # number of k in kfold
if 'kfold' in opts:
kfold = opts['kfold']
# number of instances
num_data = np.size(feat, 0)
# define selected features
x_data = feat
y_data = label.reshape(num_data) # Solve bug
fold = KFold(n_splits=kfold)
fold.get_n_splits(x_data, y_data)
ytest2 = []
ypred2 = []
t = 0
for train_idx, test_idx in fold.split(x_data, y_data):
xtrain = x_data[train_idx,:]
ytrain = y_data[train_idx]
xtest = x_data[test_idx,:]
ytest = y_data[test_idx]
# train model
mdl = LinearRegression()
mdl.fit(xtrain, ytrain)
# prediction
ypred = mdl.predict(xtest)
ytest2 = np.concatenate((ytest2, ytest), axis=0)
ypred2 = np.concatenate((ypred2, ypred), axis=0)
if t == 0:
xtest2 = xtest
else:
xtest2 = np.concatenate((xtest2, xtest), axis=0)
t += 1
# mean square error
mse = np.mean((ytest2 - ypred2) ** 2)
# r2 score
r2 = r2_score(ytest2, ypred2)
print("Mean Square Error (LR_K-fold):", mse)
print("R Square Score (LR_K-fold):", r2)
lr = {'mse': mse, 'r2': r2, 'xtest': xtest2, 'ytest': ytest2, 'ypred': ypred2}
return lr
def jloo(feat, label, opts):
# number of instances
num_data = np.size(feat, 0)
# define selected features
x_data = feat
y_data = label.reshape(num_data) # Solve bug
loo = LeaveOneOut()
loo.get_n_splits(x_data)
ytest2 = []
ypred2 = []
t = 0
for train_idx, test_idx in loo.split(x_data):
xtrain = x_data[train_idx,:]
ytrain = y_data[train_idx]
xtest = x_data[test_idx,:]
ytest = y_data[test_idx]
# train model
mdl = LinearRegression()
mdl.fit(xtrain, ytrain)
# prediction
ypred = mdl.predict(xtest)
ytest2 = np.concatenate((ytest2, ytest), axis=0)
ypred2 = np.concatenate((ypred2, ypred), axis=0)
if t == 0:
xtest2 = xtest
else:
xtest2 = np.concatenate((xtest2, xtest), axis=0)
t += 1
# mean square error
mse = np.mean((ytest2 - ypred2) ** 2)
# r2 score
r2 = r2_score(ytest2, ypred2)
print("Mean Square Error (LR_LOO):", mse)
print("R Square Score (LR_LOO):", r2)
lr = {'mse': mse, 'r2': r2, 'xtest': xtest2, 'ytest': ytest2, 'ypred': ypred2}
return lr
``` |
{
"source": "jingweiz/baselines-rudder",
"score": 3
} |
#### File: baselines/ppo2_rudder/reward_redistribution.py
```python
import sys
import time
from collections import OrderedDict
import numpy as np
import tensorflow as tf
from TeLL.layers import (DenseLayer, LSTMLayer, RNNInputLayer, ConcatLayer, MultiplyFactorLayer,
ReshapeLayer, SumLayer, LSTMLayerGetNetInput, LSTMLayerSetNetInput, StopGradientLayer)
from TeLL.initializations import constant
from TeLL.utility.misc_tensorflow import layers_from_specs, tensor_shape_with_flexible_dim, TriangularValueEncoding
from TeLL.regularization import regularize
def observation_network(single_frame, delta_frame, additional_inputs, observation_network_config):
"""Frame processing for LSTM-based network
Frame processing for LSTM-based network; single_frame and delta_frame are processed by convolutional layers,
flattened, and concatenated with the features in additional_inputs;
Parameters
-------
single_frame : TeLL layer or tensor
Single input frame of shape (batchsize, timesteps or 1, x, y, c)
delta_frame : TeLL layer or tensor
Pixel-wise delta of input frame of shape (batchsize, timesteps or 1, x, y, c)
additional_inputs : list of TeLL layers or tensors
List of additional inputs of shape (batchsize, timesteps or 1, f)
observation_network_config : dict
Dictionary containing config for observation network that processes observations and feeds them to LSTM
network:
-----
show_states : bool
Show frames to network?
show_statedeltas : bool
Show frame deltas to network?
prepoc_states : list of dicts
Network config to preprocess frames
prepoc_deltas : list of dicts
Network config to preprocess frame deltas
prepoc_observations : list of dicts
Network config to preprocess features from frame and frame-delta preprocessing networks
Returns
-------
observation_layers : list of TeLL layers
Layers in observation network
visual_features : TeLL layer
Features created from visual input without additional_inputs
"""
print("Building observation network...")
observation_layers = list()
#
# Preprocessing of single input frames
#
if observation_network_config['show_states']:
print("\tSingle frame preprocessing...")
lstm_states_prepoc_layers = []
# Normalize states to [-1, 1]
state_scaling_layer = MultiplyFactorLayer(single_frame, factor=tf.constant(2 / 255., dtype=tf.float32))
lstm_states_prepoc_layers.append(SumLayer([state_scaling_layer, tf.constant([-1.],
dtype=tf.float32)]))
lstm_states_prepoc_layers += layers_from_specs(incoming=lstm_states_prepoc_layers[-1],
layerspecs=observation_network_config['prepoc_states'])
observation_layers.append(lstm_states_prepoc_layers[-1])
#
# Preprocessing of delta input frames
#
if observation_network_config['show_statedeltas']:
print("\tDelta frame preprocessing...")
lstm_deltas_prepoc_layers = []
# Normalize state deltas to [-1, 1]
lstm_deltas_prepoc_layers.append(MultiplyFactorLayer(delta_frame, factor=tf.constant(1 / 255.,
dtype=tf.float32)))
lstm_deltas_prepoc_layers += layers_from_specs(incoming=lstm_deltas_prepoc_layers[-1],
layerspecs=observation_network_config['prepoc_deltas'])
observation_layers.append(lstm_deltas_prepoc_layers[-1])
#
# Further preprocessing of visual observations (concatenated frame- and delta frame features)
#
if len(observation_layers) > 1:
observation_layers.append(ConcatLayer(observation_layers, name="ObservationsConcatLayer"))
if observation_network_config['show_states'] or observation_network_config['show_statedeltas']:
print("\tObservations preprocessing...")
observation_layers += layers_from_specs(incoming=observation_layers[-1],
layerspecs=observation_network_config['prepoc_observations'])
print("\t\tbuilding {}...".format('ReshapeLayer'), end='')
observation_layers.append(ReshapeLayer(observation_layers[-1],
shape=(observation_layers[-1].get_output_shape()[:2]
+ [np.prod(observation_layers[-1].get_output_shape()[2:])]),
name='ObservationsFlattenLayer'))
print(" in {} / out {}".format(observation_layers[-2].get_output_shape(),
observation_layers[-1].get_output_shape()))
visual_features = observation_layers[-1]
#
# Concatenate observations with additional input features
#
if len(additional_inputs):
print("\tAppending additional inputs...")
observation_layers.append(ConcatLayer([observation_layers[-1]] + additional_inputs,
name="ConcatObservationsAdditionalFeatures"))
print("Observation network output shape: {}".format(observation_layers[-1].get_output_shape()))
if len(observation_layers) == 0:
raise ValueError("Observation network empty! Please set show_environment or show_sctaions to True or "
"specify additional_inputs!")
return observation_layers, visual_features
class RewardRedistributionModel(object):
def __init__(self, reward_redistribution_config, observation_network_config, lstm_network_config, training_config,
scopename="RR", loop_parallel_iterations=200, aux_target_horizont=10,
write_histograms=False):
"""LSTM based network for decomposing return and performing reward redistribution via `integrated gradients`_
as described in RUDDER paper
batchsize != 1 currently not supported; Variable sizes for n_timesteps may be used; Example configurations can
be found in folder ppo2_rudder/configs;
Parameters
----------
reward_redistribution_config : dict
Dictionary containing config for reward redistribution:
-----
lambda_eligibility_trace : float
Eligibility trace value for redistributed reward
vf_contrib : float
Weighting of original value function (vf) vs. redistributed reward (rr), s.t.
:math:`reward = vf \cdot vf\_contrib + rr \cdot (1-vf\_contrib)`
use_reward_redistribution_quality_threshold : float
Quality of reward redistribution has to exceed use_reward_redistribution_quality_threshold to be used;
use_reward_redistribution_quality_threshold range is [0,1]; Quality measure is the squared prediction
error, as described in RUDDER paper;
use_reward_redistribution : bool
Use reward redistribution?
rr_junksize : int
Junksize for reward redistribution; Junks overlap by 1 half each
cont_pred_w : float
Weighting of continous prediciton loss vs. prediction loss of final return at last timestep
intgrd_steps : int
Stepsize for integrated gradients
intgrd_batchsize : int
Integrated gradients is computed batch-wise if intgrd_batchsize > 1
observation_network_config : dict
Dictionary containing config for observation network that processes observations and feeds them to LSTM
network:
-----
show_states : bool
Show frames to network?
show_statedeltas : bool
Show frame deltas to network?
prepoc_states : list of dicts
Network config to preprocess frames
prepoc_deltas : list of dicts
Network config to preprocess frame deltas
prepoc_observations : list of dicts
Network config to preprocess features from frame and frame-delta preprocessing networks
lstm_network_config : dict
Dictionary containing config for LSTM network:
-----
show_actions : bool
Show taken actions to LSTM?
reversed : bool
Process game sequence in reversed order?
layers : list of dicts
Network config for LSTM network and optional additional dense layers
initializations : dict
Initialization config for LSTM network
timestep_encoding : dict
Set "max_value" and "triangle_span" for TeLL.utiltiy.misc_tensorflow.TriangularValueEncoding class
training_config : dict
Dictionary containing config for training and update procedure:
-----
n_no_rr_updates : int
Number of updates to perform without training or using reward redistribution network
n_pretrain_games : int
Number of games to pretrain the reward redistribution network without using it;
downscale_lr_policylag : bool
Downscale learningrate permanently if policy lag gets too large?
optimizer : tf.train optimizer
Optimizer in tf.train, e.g. "AdamOptimizer"
optimizer_params : dict
Kwargs for optimizer
l1 : float
Weighting for l1 weight regularization
l2 : float
Weighting for l2 weight regularization
clip_gradients : float
Threshold for clipping gradients (clipping by norm)
scopename : str
Name for tensorflow variable scope
loop_parallel_iterations : int
Number of max. parallel loop computations in tf.while loop
aux_target_horizont : int
Number of timesteps to predict ahead in one of the auxiliary tasks (task is to predict the accumulated
reward in the next aux_target_horizont timesteps).
write_histograms : bool
Write histograms of weights and activations to tensorboad for debugging?
"""
self.scopename = scopename
self.placeholders = None
self.data_tensors = None
self.operation_tensors = None
self.summaries = None
self.loop_parallel_iterations = loop_parallel_iterations
self.aux_target_pad = np.zeros((aux_target_horizont - 1,), dtype=np.float32)
self.aux_target_filter = np.ones((aux_target_horizont,), dtype=np.float32) / aux_target_horizont
self.reward_redistribution_config = reward_redistribution_config
self.observation_network_config = observation_network_config
self.lstm_network_config = lstm_network_config
self.training_config = training_config
self.reward_redistribution_config = reward_redistribution_config
self.ingametime = TriangularValueEncoding(**self.lstm_network_config['timestep_encoding'])
self.write_histograms = write_histograms
def get_visual_features(self, single_frame, delta_frame, additional_inputs):
"""Get output features of observation network only"""
print("Building RR visual observation network...")
with tf.variable_scope(self.scopename, reuse=tf.AUTO_REUSE):
with tf.variable_scope("rr_visionsystem", reuse=tf.AUTO_REUSE):
# Create first part of observation network and get visual_features to feed to A2C
_, visual_features = observation_network(single_frame=single_frame,
delta_frame=delta_frame,
additional_inputs=additional_inputs,
observation_network_config=self.observation_network_config)
return visual_features
def build_model(self, state_shape, policy_model, n_actions):
"""Build reward redistribution network, including observation network and LSTM network"""
ingametime = self.ingametime
n_batch = 1 # Currently only works for 1 sequence at a time
print("Building RR observation network...")
# --------------------------------------------------------------------------------------------------------------
# Shapes
# --------------------------------------------------------------------------------------------------------------
states_shape = [n_batch, None] + list(state_shape)
state_shape_rr = [n_batch, 1] + list(state_shape[:-1]) + [1]
actions_shape = [n_batch, None]
action_shape = [n_batch, 1, n_actions]
intgrd_batchsize = self.reward_redistribution_config['intgrd_batchsize']
# --------------------------------------------------------------------------------------------------------------
# Placeholders
# --------------------------------------------------------------------------------------------------------------
game_frames_placeholder = tf.placeholder(shape=states_shape, dtype=tf.uint8)
game_actions_placeholder = tf.placeholder(shape=actions_shape, dtype=tf.int32)
game_rewards_placeholder = tf.placeholder(shape=(None,), dtype=tf.float32)
aux_target_placeholder = tf.placeholder(shape=(None, 2), dtype=tf.float32)
game_length_placeholder = tf.placeholder(shape=(), dtype=tf.int32)
# --------------------------------------------------------------------------------------------------------------
# Input to LSTM network
# --------------------------------------------------------------------------------------------------------------
# Create input layers (these will be set dynamically in LSTM loop)
state_input_layer = RNNInputLayer(tf.zeros(state_shape_rr, dtype=tf.float32))
statedelta_input_layer = RNNInputLayer(tf.zeros(state_shape_rr, dtype=tf.float32))
rr_action_input_layer = RNNInputLayer(tf.zeros(action_shape, dtype=tf.float32))
rr_time_input_layer = RNNInputLayer(ingametime.encode_value(tf.constant(0, shape=(n_batch,), dtype=tf.int32)))
h_actor_input_layer = RNNInputLayer(tf.zeros(shape=policy_model.observation_features_shape))
with tf.variable_scope(self.scopename, reuse=tf.AUTO_REUSE):
with tf.variable_scope("rr_visionsystem", reuse=tf.AUTO_REUSE):
# RR observations will be single-/delta frames and features from A2C, actions, and ingame-time
additional_inputs = [StopGradientLayer(h_actor_input_layer), rr_action_input_layer,
ReshapeLayer(rr_time_input_layer, (n_batch, 1, ingametime.n_nodes_python))]
lstm_prepoc_layers, _ = observation_network(single_frame=state_input_layer,
delta_frame=statedelta_input_layer,
additional_inputs=additional_inputs,
observation_network_config=self.observation_network_config)
rr_input = lstm_prepoc_layers[-1]
rr_input_layer = RNNInputLayer(rr_input)
# ----------------------------------------------------------------------------------------------------------
# LSTM network
# ----------------------------------------------------------------------------------------------------------
print("Building RR LSTM network...")
# 1 node for predicting the return at the last timestep and 3 for auxiliary tasks
n_rr_output_units = 1 + 3
#
# Initialization/activation functions
#
layerspecs = self.lstm_network_config['layers']
init_specs = self.lstm_network_config['initializations']
lstm_layer_index = [i for i, l in enumerate(layerspecs) if l['type'] == 'LSTMLayer'][0]
lstm_specs = layerspecs[lstm_layer_index]
n_lstm = lstm_specs['n_units']
lstm_w_init = lambda scale: lambda *args, **kwargs: tf.truncated_normal(*args, **kwargs) * scale
truncated_normal_init = lambda mean, stddev: \
lambda *args, **kwargs: tf.truncated_normal(mean=mean, stddev=stddev, *args, **kwargs)
if lstm_specs['a_out'] == 'linear':
lstm_a_out = tf.identity
elif lstm_specs['a_out'] == 'tanh':
lstm_a_out = tf.tanh
og_bias = truncated_normal_init(mean=init_specs['og_bias'], stddev=0.1)
ig_bias = truncated_normal_init(mean=init_specs['ig_bias'], stddev=0.1)
ci_bias = truncated_normal_init(mean=init_specs['ci_bias'], stddev=0.1)
fg_bias = truncated_normal_init(mean=init_specs['fg_bias'], stddev=0.1)
#
# Layers setup
#
print("\tLSTM network for RR...")
with tf.variable_scope('lstmnet', reuse=tf.AUTO_REUSE):
# Store all layers in a list
rr_layers = [rr_input_layer]
#
# Create layers before LSTM
#
rr_layers += layers_from_specs(incoming=rr_input_layer, layerspecs=layerspecs[:lstm_layer_index])
rr_lstm_input_layer = rr_layers[-1]
#
# Create LSTM layer
#
w_ig = [lstm_w_init(init_specs['w_ig'][0]), lstm_w_init(init_specs['w_ig'][1])]
w_og = [lstm_w_init(init_specs['w_og'][0]), lstm_w_init(init_specs['w_og'][1])]
w_ci = [lstm_w_init(init_specs['w_ci'][0]), lstm_w_init(init_specs['w_ci'][1])]
w_fg = [lstm_w_init(init_specs['w_fg'][0]), lstm_w_init(init_specs['w_fg'][1])]
rr_lstm_layer = LSTMLayerGetNetInput(incoming=rr_lstm_input_layer, n_units=n_lstm,
name='LSTMCreditAssignment',
W_ci=w_ci, W_ig=w_ig, W_og=w_og, W_fg=w_fg,
b_ci=ci_bias([n_lstm]), b_ig=ig_bias([n_lstm]),
b_og=og_bias([n_lstm]), b_fg=fg_bias([n_lstm]),
a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid,
a_out=lstm_a_out,
c_init=tf.zeros, h_init=tf.zeros, forgetgate=True,
precomp_fwds=False, store_states=True, return_states=False)
rr_layers.append(rr_lstm_layer)
#
# Create layers after LSTM
#
if lstm_layer_index + 1 < len(layerspecs):
rr_layers += layers_from_specs(incoming=rr_layers[-1], layerspecs=layerspecs[lstm_layer_index + 1:])
#
# Create output layer
#
rr_layers.append(DenseLayer(incoming=rr_layers[-1], n_units=n_rr_output_units, a=tf.identity,
W=lstm_w_init(1), b=constant([n_rr_output_units], 0.),
name='DenseCAout'))
rr_output_layer = rr_layers[-1]
#
# LSTM for integrated gradients
# batched input; reduced LSTM layer for faster integrated gradient computation (we store input-activations
# and don't have to recompute convolutions etc.);
#
with tf.variable_scope('lstmnet', reuse=tf.AUTO_REUSE):
intgrd_layers = []
# Placeholder for precomputed inputs to LSTM
intgrd_input_shape = ([intgrd_batchsize, None] + rr_lstm_layer.cur_net_fwd.shape.as_list()[1:])
intgrd_input_placeholder = tf.placeholder(dtype=tf.float32, shape=[1] + intgrd_input_shape[1:])
intgrd_input_layer = RNNInputLayer(tf.zeros((intgrd_input_shape[0], 1, intgrd_input_shape[2])))
#
# Create part of LSTM layer
#
# Reuse weights, activations and such from trained LSTM layer
w_ig = rr_lstm_layer.W_bwd['ig']
w_og = rr_lstm_layer.W_bwd['og']
w_ci = rr_lstm_layer.W_bwd['ci']
w_fg = rr_lstm_layer.W_bwd['fg']
intgrd_lstm_layer = LSTMLayerSetNetInput(incoming=intgrd_input_layer, n_units=rr_lstm_layer.n_units,
name=rr_lstm_layer.name,
W_ci=w_ci, W_ig=w_ig, W_og=w_og, W_fg=w_fg,
b_ci=rr_lstm_layer.b['ci'], b_ig=rr_lstm_layer.b['ig'],
b_og=rr_lstm_layer.b['og'], b_fg=rr_lstm_layer.b['fg'],
a_ci=rr_lstm_layer.a['ci'], a_ig=rr_lstm_layer.a['ig'],
a_og=rr_lstm_layer.a['og'], a_fg=rr_lstm_layer.a['fg'],
a_out=rr_lstm_layer.a['out'],
c_init=tf.zeros, h_init=tf.zeros, forgetgate=True,
precomp_fwds=False, store_states=True, return_states=False)
intgrd_layers.append(intgrd_lstm_layer)
#
# Create layers after LSTM
#
if lstm_layer_index + 1 < len(layerspecs):
intgrd_layers += layers_from_specs(incoming=intgrd_layers[-1],
layerspecs=layerspecs[lstm_layer_index + 1:])
intgrd_layers.append(DenseLayer(incoming=intgrd_layers[-1], n_units=n_rr_output_units, a=tf.identity,
W=lstm_w_init(1), b=constant([n_rr_output_units], 0.),
name='DenseCAout'))
intgrd_output_layer = intgrd_layers[-1]
# --------------------------------------------------------------------------------------------------------------
# LSTM network loop
# --------------------------------------------------------------------------------------------------------------
#
# Layers that require sequential computation (i.e. after LSTM incl. LSTM) will be computed in a tf.while loop
#
print("\tSetting up LSTM loop...")
g = tf.get_default_graph()
#
# Get layers that require sequential computation (i.e. after LSTM incl. LSTM but excluding output layer)
#
rr_lstm_layer_position = [i for i, l in enumerate(rr_layers)
if isinstance(l, LSTMLayer) or isinstance(l, LSTMLayerGetNetInput)][0]
rr_layers_head = rr_layers[rr_lstm_layer_position + 1:-1]
n_timesteps = game_length_placeholder - 1
with tf.name_scope("RNNLoopLSTM"):
#
# Ending condition
#
def cond(time, *args):
"""Break if game is over by looking at n_timesteps"""
return ~tf.greater(time, n_timesteps)
#
# Loop body
#
# Create initial tensors
init_tensors = OrderedDict([
('time', tf.constant(0, dtype=tf.int32)),
('rr_net_fwd', tf.zeros([n_batch, 1] + rr_lstm_layer.cur_net_fwd.shape.as_list()[1:],
dtype=tf.float32)),
('rr_lstm_internals', tf.expand_dims(tf.stack([rr_lstm_layer.c[-1], rr_lstm_layer.c[-1],
rr_lstm_layer.c[-1], rr_lstm_layer.c[-1],
rr_lstm_layer.c[-1]], axis=-1), axis=1)),
('rr_lstm_h', tf.expand_dims(rr_lstm_layer.h[-1], axis=1)),
('rr_pred_reward', tf.zeros([s if s >= 0 else 1 for s in rr_output_layer.get_output_shape()]))
])
if len(rr_layers_head) > 0:
init_tensors.update(OrderedDict(
[('dense_layer_{}'.format(i),
tf.zeros([s for s in l.get_output_shape() if s >= 0], dtype=tf.float32))
for i, l in enumerate(rr_layers_head)]))
# Get initial tensor shapes in tf format
init_shapes = OrderedDict([
('time', init_tensors['time'].get_shape()),
('rr_net_fwd', tensor_shape_with_flexible_dim(init_tensors['rr_net_fwd'], dim=1)),
('rr_lstm_internals', tensor_shape_with_flexible_dim(init_tensors['rr_lstm_internals'], dim=1)),
('rr_lstm_h', tensor_shape_with_flexible_dim(init_tensors['rr_lstm_h'], dim=1)),
('rr_pred_reward', tensor_shape_with_flexible_dim(init_tensors['rr_pred_reward'], dim=1)),
])
if len(rr_layers_head) > 0:
init_shapes.update(OrderedDict(
[('dense_layer_{}'.format(i), init_tensors['dense_layer_{}'.format(i)].get_shape())
for i, l in enumerate(rr_layers_head)]))
def body_rr(time, rr_net_fwd, rr_lstm_internals, rr_lstm_h, rr_pred_reward, *args):
"""Loop over frames and additional inputs, compute network outputs and store hidden states and
activations for debugging/plotting and integrated gradients calculation"""
if self.lstm_network_config['reversed']:
time_index = n_timesteps - time
else:
time_index = time
#
# Set state and state-deltas as network input
#
if self.observation_network_config['show_states']:
state_input_layer.update(tf.cast(tf.expand_dims(game_frames_placeholder[:, time_index, ..., -1:],
axis=1), dtype=tf.float32))
if self.observation_network_config['show_statedeltas']:
# Set the delta at timestep 0 to 0
delta_state = tf.cond(tf.equal(time_index, tf.constant(0, dtype=tf.int32)),
lambda: tf.zeros_like(tf.cast(game_frames_placeholder[:, 0, ..., -1:],
dtype=tf.float32)),
lambda: (tf.cast(game_frames_placeholder[:, time_index, ..., -1:],
dtype=tf.float32)
- tf.cast(game_frames_placeholder[:, time_index - 1, ..., -1:],
dtype=tf.float32)))
statedelta_input_layer.update(tf.expand_dims(delta_state, axis=1))
#
# Set policy model input
#
h_actor_input_layer.update(policy_model.get_observation_features(
frame=tf.cast(tf.expand_dims(game_frames_placeholder[:, time_index], axis=1), dtype=tf.float32),
delta=statedelta_input_layer.out))
#
# Set time and actions as network input
#
rr_time_input_layer.update(self.ingametime.encode_value(time_index))
if self.lstm_network_config['show_actions']:
curr_action = tf.expand_dims(game_actions_placeholder[:, time_index], axis=1)
curr_action = tf.cast(tf.one_hot(curr_action, depth=n_actions, axis=-1), dtype=tf.float32)
rr_action_input_layer.update(curr_action)
#
# Update and compute LSTM inputs
#
curr_rr_input = rr_input.get_output()
rr_input_layer.update(curr_rr_input)
#
# Update LSTM cell-state and output with states from last timestep
#
rr_lstm_layer.c[-1], rr_lstm_layer.h[-1] = rr_lstm_internals[:, -1, :, -1], rr_lstm_h[:, -1, :]
#
# Calculate reward redistribution network output and append it to last timestep
#
rr_pred_reward = tf.concat([rr_pred_reward,
rr_output_layer.get_output(prev_layers=[rr_input_layer])], axis=1)
#
# Store LSTM states for all timesteps for visualization
#
rr_lstm_internals = tf.concat([rr_lstm_internals,
tf.expand_dims(
tf.stack([rr_lstm_layer.ig[-1], rr_lstm_layer.og[-1],
rr_lstm_layer.ci[-1], rr_lstm_layer.fg[-1],
rr_lstm_layer.c[-1]], axis=-1),
axis=1)],
axis=1)
#
# Store LSTM output and forward-part of input activation for integrated gradients
#
rr_lstm_h = tf.concat([rr_lstm_h, tf.expand_dims(rr_lstm_layer.h[-1], axis=1)], axis=1)
rr_net_fwd = tf.concat([rr_net_fwd, tf.expand_dims(rr_lstm_layer.cur_net_fwd, axis=1)], axis=1)
#
# Store output of optional layers above LSTM for debugging
#
rr_layers_head_activations = [l.out for l in rr_layers_head]
#
# Increment time
#
time += tf.constant(1, dtype=tf.int32)
return [time, rr_net_fwd, rr_lstm_internals, rr_lstm_h, rr_pred_reward, *rr_layers_head_activations]
wl_ret = tf.while_loop(cond=cond, body=body_rr, loop_vars=tuple(init_tensors.values()),
shape_invariants=tuple(init_shapes.values()),
parallel_iterations=self.loop_parallel_iterations, back_prop=True, swap_memory=True)
# Re-Associate returned tensors with keys
rr_returns = OrderedDict(zip(init_tensors.keys(), wl_ret))
# Remove initialization timestep
rr_returns['rr_net_fwd'] = rr_returns['rr_net_fwd'][:, 1:]
rr_returns['rr_lstm_internals'] = rr_returns['rr_lstm_internals'][:, 1:]
rr_returns['rr_lstm_h'] = rr_returns['rr_lstm_h'][:, 1:]
rr_returns['rr_pred_reward'] = rr_returns['rr_pred_reward'][:, 1:]
if len(rr_layers_head) > 0:
for i, l in enumerate(rr_layers_head):
rr_returns['dense_layer_{}'.format(i)] = rr_returns['dense_layer_{}'.format(i)][:, 1:]
accumulated_reward = tf.reduce_sum(game_rewards_placeholder)
predicted_reward = rr_returns['rr_pred_reward']
#
# Track exponential mean of reward
#
with tf.variable_scope(self.scopename, reuse=tf.AUTO_REUSE):
emean_reward = tf.get_variable('emean_reward', initializer=tf.constant(0., dtype=tf.float32),
trainable=False)
d = 0.99
emean_reward = tf.assign(emean_reward, d * emean_reward + (1. - d) * accumulated_reward)
#
# Error for reward prediction
#
with g.control_dependencies([emean_reward]):
reward_prediction_error = predicted_reward[0, -1, 0] - accumulated_reward
# --------------------------------------------------------------------------------------------------------------
# Loss and Update Steps for reward redistribution network
# --------------------------------------------------------------------------------------------------------------
print("\tSetting up RR updates...")
layers_to_train = rr_layers + lstm_prepoc_layers
rr_trainables = [t for t in tf.trainable_variables() if t.name.find('RR/lstmnet') != -1]
rr_trainables_vs = [t for t in tf.trainable_variables() if t.name.find('RR/rr_visionsystem') != -1]
rr_trainables = rr_trainables + rr_trainables_vs
#
# RR Update
#
# Main loss target
rr_loss_last_timestep = tf.square(reward_prediction_error)
# Add regularization penalty
rr_reg_penalty = regularize(layers=layers_to_train, l1=self.training_config['l1'],
l2=self.training_config['l2'], regularize_weights=True, regularize_biases=True)
rr_loss = rr_loss_last_timestep + rr_reg_penalty
# Auxiliary losses
cumsum_rewards = tf.cumsum(game_rewards_placeholder)
target_all_ts = tf.constant(0., dtype=tf.float32) # avoid errors if aux. losses aren't used
if self.reward_redistribution_config['cont_pred_w']:
# Calculate mean over aux. losses and add them to main loss
target_all_ts = aux_target_placeholder
rr_loss_all_timesteps = (tf.reduce_sum(tf.reduce_mean(tf.square(aux_target_placeholder
- predicted_reward[0, :, 1:-1]), axis=0))
+ tf.reduce_mean(tf.square(cumsum_rewards - predicted_reward[0, :, -1]))) / 3.
rr_loss += rr_loss_all_timesteps * self.reward_redistribution_config['cont_pred_w']
# Get gradients
rr_grads = tf.gradients(rr_loss, rr_trainables)
if self.training_config['clip_gradients']:
rr_grads, _ = tf.clip_by_global_norm(rr_grads, self.training_config['clip_gradients'])
# Set up optimizer
rr_update = tf.constant(0)
if self.training_config['optimizer_params']['learning_rate'] != 0:
with tf.variable_scope('rr_update', tf.AUTO_REUSE):
optimizer = getattr(tf.train,
self.training_config['optimizer'])(**self.training_config['optimizer_params'])
rr_update = optimizer.apply_gradients(zip(rr_grads, rr_trainables))
# --------------------------------------------------------------------------------------------------------------
# Integrated Gradients
# --------------------------------------------------------------------------------------------------------------
print("\tSetting up Integrated Gradients...")
# Create input that interpolates between zeroed- and full input sequence
intgrd_w_placeholder = tf.placeholder(dtype=tf.float32, shape=(intgrd_batchsize,))
intgrd_input = tf.concat(
[intgrd_input_placeholder * intgrd_w_placeholder[s] for s in range(intgrd_batchsize)],
axis=0)
#
# Ending condition
#
def cond(time, *args):
"""Break if game is over"""
return ~tf.greater(time, n_timesteps)
#
# Loop body
#
# Create initial tensors
init_tensors = OrderedDict([
('time', tf.constant(0, dtype=tf.int32)),
('lstm_c', tf.zeros((intgrd_batchsize, n_lstm), dtype=tf.float32)),
('lstm_h', tf.zeros((intgrd_batchsize, n_lstm), dtype=tf.float32)),
('pred', tf.zeros(intgrd_output_layer.get_output_shape()))
])
# Get initial tensor shapes in tf format
init_shapes = OrderedDict([
('time', init_tensors['time'].get_shape()),
('lstm_c', init_tensors['lstm_c'].get_shape()),
('lstm_h', init_tensors['lstm_h'].get_shape()),
('pred', init_tensors['pred'].get_shape()),
])
def body_intgrd(time, lstm_c, lstm_h, pred):
"""Loop body for integrated gradients"""
if self.lstm_network_config['reversed']:
time_index = n_timesteps - time
else:
time_index = time
#
# Update layer with precomputed forward input activations for LSTM
#
intgrd_input_layer.update(intgrd_input[:, time_index:time_index + 1, :])
#
# Update LSTM states
#
intgrd_lstm_layer.c[-1], intgrd_lstm_layer.h[-1] = lstm_c, lstm_h
#
# Calculate output
#
pred = intgrd_output_layer.get_output()
lstm_c = intgrd_lstm_layer.c[-1]
lstm_h = intgrd_lstm_layer.h[-1]
# Increment time
time += tf.constant(1, dtype=tf.int32)
return [time, lstm_c, lstm_h, pred]
wl_ret = tf.while_loop(cond=cond, body=body_intgrd, loop_vars=tuple(init_tensors.values()),
shape_invariants=tuple(init_shapes.values()),
parallel_iterations=self.loop_parallel_iterations, back_prop=True, swap_memory=True)
intgrd_pred = wl_ret[-1]
# For reward redistribution, use only main task and aux. task for accumulated reward prediction
intgrd_pred = intgrd_pred[..., 0] + intgrd_pred[..., -1]
# Get gradients, set NaNs to 0
grads = tf.gradients(intgrd_pred, intgrd_input)[0]
grads = tf.where(tf.is_nan(grads), tf.zeros_like(grads), grads)
# Get prediction at first sample as we need zero-sample prediction for quality check of integrated gradients
intgrd_pred_first_sample = intgrd_pred[0]
# Calc gradients, sum over batch dimension
intgrd_grads = tf.reduce_sum(grads, axis=0)
# Scale by original input
intgrd_grads *= intgrd_input_placeholder[0]
# Sum over features=lstm units
intgrd_grads = tf.reduce_sum(intgrd_grads, axis=-1)
# --------------------------------------------------------------------------------------------------------------
# TF-summaries
# --------------------------------------------------------------------------------------------------------------
tf.summary.scalar("Environment/reward", accumulated_reward)
tf.summary.scalar("Environment/emean_reward", emean_reward)
tf.summary.scalar("Environment/n_timesteps", n_timesteps)
tf.summary.scalar("RR/rr_loss_last_timestep", rr_loss_last_timestep)
if self.reward_redistribution_config['cont_pred_w']:
tf.summary.scalar("RR/rr_loss_all_timesteps", rr_loss_all_timesteps)
tf.summary.scalar("RR/rr_reg_penalty", rr_reg_penalty)
tf.summary.scalar("RR/rr_loss", rr_loss)
tf.summary.scalar("RR/predicted_reward", predicted_reward[0, -1, 0])
if self.write_histograms:
[tf.summary.histogram("activations/RR/{}".format(n), values=rr_returns['rr_lstm_internals'][0, -1, 0, i])
for i, n in enumerate(['rr_lstm_ig', 'rr_lstm_og', 'rr_lstm_ci', 'rr_lstm_fg'])]
tf.summary.histogram("activations/RR/lstm_h", rr_returns['rr_lstm_h'][0, -1, :])
[tf.summary.histogram("gradients/RR/{}".format(t.name), values=g) for g, t in zip(rr_grads, rr_trainables)]
[tf.summary.histogram("weights/RR/{}".format(t.name), values=t) for t in rr_trainables]
# --------------------------------------------------------------------------------------------------------------
# Publish
# --------------------------------------------------------------------------------------------------------------
# Placeholders
placeholders = OrderedDict(
game_frames_placeholder=game_frames_placeholder,
game_actions_placeholder=game_actions_placeholder,
game_rewards_placeholder=game_rewards_placeholder,
game_length_placeholder=game_length_placeholder,
aux_target_placeholder=aux_target_placeholder,
intgrd_input_placeholder=intgrd_input_placeholder,
intgrd_w_placeholder=intgrd_w_placeholder
)
# Data
data_tensors = OrderedDict(
lstm_internals=rr_returns['rr_lstm_internals'][0],
lstm_h=rr_returns['rr_lstm_h'],
intgrd_gradients=intgrd_grads,
intgrd_lstm_net_fwd=rr_returns['rr_net_fwd'],
intgrd_pred_last_sample=intgrd_pred[-1],
intgrd_pred_first_sample=intgrd_pred_first_sample,
rr_loss_last_timestep=rr_loss_last_timestep,
rr_loss=rr_loss,
predictions=predicted_reward[0, :, :],
aux_target_all_ts=target_all_ts,
pred_return=predicted_reward[0, -1, 0]
)
# Operations
operation_tensors = OrderedDict(
rr_update=rr_update
)
# Summaries
summaries = OrderedDict(
all_summaries=tf.summary.merge_all()
)
self.placeholders = placeholders
self.data_tensors = data_tensors
self.operation_tensors = operation_tensors
self.summaries = summaries
def reward_redistribution(self, tf_session, states, actions, rewards, aux_target, avg_reward,
redistribute_reward=False, details=False, use_reward_redistribution=True,
summaries=False, update=True, verbose=True):
"""Perform reward redistribution without junking
batchsize != 1 currently not supported; see reward_redistribution_junked() for more information;
"""
if verbose:
print(" started loop with {} timesteps...".format(len(rewards)), end="")
starttime = time.time()
sys.stdout.flush()
#
# Set placeholder values
#
placeholder_values = OrderedDict(
game_frames_placeholder=states,
game_actions_placeholder=actions,
game_rewards_placeholder=rewards,
game_length_placeholder=len(rewards),
aux_target_placeholder=aux_target
)
feed_dict = dict(((self.placeholders[k], placeholder_values[k]) for k in placeholder_values.keys()))
#
# Decide which tensors to compute
#
data_keys = ['rr_loss', 'rr_loss_last_timestep', 'pred_return']
if redistribute_reward:
data_keys += ['intgrd_lstm_net_fwd', 'predictions']
if details:
data_keys += ['lstm_internals', 'lstm_h', 'predictions', 'aux_target_all_ts']
data_tensors = [self.data_tensors[k] for k in data_keys]
operation_keys = []
if update:
operation_keys += ['rr_update']
operation_tensors = [self.operation_tensors[k] for k in operation_keys]
summary_keys = []
if summaries:
summary_keys += ['all_summaries']
summary_tensors = [self.summaries[k] for k in summary_keys]
#
# Run graph and re-associate return values with keys in dictionary
#
ret = tf_session.run(data_tensors + summary_tensors + operation_tensors, feed_dict)
ret_dict = OrderedDict(((k, ret[i]) for i, k in enumerate(data_keys)))
del ret[:len(data_keys)]
ret_dict.update(OrderedDict(((k, ret[i]) for i, k in enumerate(summary_keys))))
#
# Check reward redistribution and integrated gradients quality
#
ret_dict['rel_error'] = 1
ret_dict['rr_quality'] = 0
if redistribute_reward:
#
# Calculate squared percentage prediction error to scale reward mixing (ignore redistribution if error>20%)
#
use_reward_redistribution_quality_threshold = \
self.reward_redistribution_config['use_reward_redistribution_quality_threshold']
target = np.sum(rewards)
epsilon_sqd = np.sqrt(np.clip(np.abs(avg_reward), a_min=1e-5, a_max=None))
prediction = ret_dict['pred_return']
sqd_perd_pred_err = ((target - prediction) ** 2) / (target**2 + epsilon_sqd)
if verbose:
print("\tsqd_perd_pred_err: {} (t:{}, p:{}".format(sqd_perd_pred_err, target, prediction))
ret_dict['rel_error'] = sqd_perd_pred_err
# Don't compute integrated gradients if error is too high
if sqd_perd_pred_err < use_reward_redistribution_quality_threshold:
intgrds, intgrdperc, zero_sample_pred = self.integrated_gradients(
tf_session=tf_session, lstm_inputs=ret_dict['intgrd_lstm_net_fwd'],
game_len=len(rewards), intgrd_steps=self.reward_redistribution_config['intgrd_steps'],
intgrd_batchsize=self.reward_redistribution_config['intgrd_batchsize'], verbose=verbose)
# Quality of reward redistribution is only > 0 if integrated gradients is good enough
ret_dict['rr_quality'] = 1 - sqd_perd_pred_err
# Check if integrated gradients signal is within +/- 20% error range
if (80. <= intgrdperc <= 120.) and use_reward_redistribution:
# Correct for integrated gradients error
intgrds += zero_sample_pred / len(rewards)
intgrdssum = np.sum(intgrds)
error = prediction - intgrdssum
intgrds += error / len(rewards)
# Correct for return prediction error
intgrds[:] *= np.clip(rewards.sum() / (prediction +
(np.sign(prediction) * (np.sqrt(epsilon_sqd) / 5))),
a_min=1e-5, a_max=1.5)
ret_dict['redistributed_reward'] = intgrds
ret_dict['intgrd_from_lstm'] = intgrds
else:
ret_dict['redistributed_reward'] = rewards
ret_dict['intgrd_from_lstm'] = intgrds
else:
ret_dict['redistributed_reward'] = rewards
ret_dict['intgrd_from_lstm'] = np.zeros_like(rewards)
else:
ret_dict['redistributed_reward'] = rewards
ret_dict['intgrd_from_lstm'] = np.zeros_like(rewards)
if verbose:
print("done! ({}sec)".format(time.time() - starttime))
return ret_dict
def reward_redistribution_junked(self, tf_session, states, actions, rewards, avg_reward, redistribute_reward=True,
use_reward_redistribution=True, update=True, details=False, summaries=False,
junksize=500, verbose=True):
"""Perform reward redistribution on longer sequences junk-wise
batchsize != 1 currently not supported; Variable sizes for n_timesteps may be used;
Parameters
----------
tf_session : tensorflow session
tensorflow session to compute the graph in
states : numpy array
Game frames of shape (batchsize, n_timesteps, x, y, c)
actions : numpy array
Taken actions of shape (batchsize, n_timesteps, 1)
rewards : numpy array
Reward from environment of shape (n_timesteps,)
avg_reward : float
Average reward used to compute reward redistribution quality measure
redistribute_reward : bool
Compute reward redistribution?
use_reward_redistribution : bool
Use reward redistribution?
update : bool
Update weights of reward redistribution model?
details : bool
Enable computation and logging of debugging details?
summaries : bool
Enable computation of tensorboard summaries?
junksize : int
Number of timesteps per sequence junk
verbose : bool
Enable printing to console?
Returns
----------
dict
Dictionary containing evaluated tensors; Non-optional key/values are:
----------
'redistributed_reward' : numpy array
Final redistributed reward in array of shape (n_timesteps,)
'intgrd_from_lstm' : numpy array
Integrated gradients signal of shape (n_timesteps,)
'rr_loss' : float
Mean loss for all targtes of reward redistribution model over all junks
'rr_loss_last_timestep' : float
Mean loss for main target (=return prediction) of reward redistribution model over all junks
'rel_error' : float
Mean relative error of reward redistribution model over all junks
'rr_quality' : float
Quality measure of reward redistribution model for last junk
"""
seq_len = len(rewards)
n_junks = int(np.ceil(seq_len / junksize))
# Overlap junks
n_junks += n_junks - 1
halved_junksize = int(junksize / 2)
redistributed_reward = np.copy(rewards)
intgrd_from_lstm = np.zeros_like(rewards)
rr_quality = np.empty_like(rewards)
# Prepare auxiliary tasks
aux_target = np.empty((rewards.shape[0], 2))
aux_target_temp = np.concatenate([rewards, self.aux_target_pad], axis=0)
aux_target[:, 0] = np.convolve(aux_target_temp, self.aux_target_filter, 'valid')
aux_target[:, 1] = np.sum(rewards) - np.cumsum(rewards)
if verbose:
print(" started loop with {} steps in {} junks...".format(len(rewards), n_junks))
starttime = time.time()
#
# Loop over junks and redistribute reward; Only store first half of overlapping junks and overwrite rest;
#
junk_ret_dicts = []
for junk in range(n_junks):
junkslice = slice(junk*halved_junksize, junk*halved_junksize+junksize)
junk_ret_dict = self.reward_redistribution(tf_session,
states[:, junkslice], actions[:, junkslice],
rewards[junkslice],
aux_target[junkslice], avg_reward=avg_reward,
redistribute_reward=redistribute_reward, details=details,
use_reward_redistribution=use_reward_redistribution,
summaries=summaries, update=update, verbose=False)
redistributed_reward[junkslice] = junk_ret_dict['redistributed_reward']
intgrd_from_lstm[junkslice] = junk_ret_dict['intgrd_from_lstm']
rr_quality[junkslice] = junk_ret_dict['rr_quality']
junk_ret_dicts.append(junk_ret_dict)
#
# If multiple junks were used, concatenate sequences for plotting etc. accordingly
#
if n_junks > 1:
ret_dict = dict(((k, np.concatenate([jrd[k][:halved_junksize] for jrd in junk_ret_dicts], axis=0))
if k == 'lstm_internals' or k == 'predictions' or k == 'aux_target_all_ts'
else (k, np.concatenate([jrd[k][:, :halved_junksize] for jrd in junk_ret_dicts], axis=1))
if k == 'lstm_h'
else (k, junk_ret_dicts[-1][k])
if k == 'all_summaries'
else (k, None)
if (k == 'intgrd_lstm_net_fwd' or k == 'redistributed_reward'
or k == 'intgrd_from_lstm' or k == 'rr_quality')
else (k, [jrd[k] for jrd in junk_ret_dicts])
for k in junk_ret_dicts[0].keys()))
else:
ret_dict = junk_ret_dicts[0]
#
# Apply eligibility trace to redistributed reward
#
et_redistributed_reward = np.zeros_like(redistributed_reward)
et_redistributed_reward[-1] = redistributed_reward[-1]
for t in reversed(range(0, len(redistributed_reward) - 1)):
et_redistributed_reward[t] = self.reward_redistribution_config['lambda_eligibility_trace'] * \
et_redistributed_reward[t + 1] + redistributed_reward[t]
#
# Add mandatory fields to return dictionary
#
ret_dict['redistributed_reward'] = et_redistributed_reward
ret_dict['intgrd_from_lstm'] = intgrd_from_lstm
ret_dict['rr_loss'] = np.mean(ret_dict['rr_loss'])
ret_dict['rr_loss_last_timestep'] = np.mean(ret_dict['rr_loss_last_timestep'])
ret_dict['rel_error'] = np.mean(ret_dict['rel_error'])
# rr_quality taken from the last junk. (final return)
ret_dict['rr_quality'] = rr_quality
if verbose:
print("\t...done! ({}sec)".format(time.time() - starttime))
return ret_dict
def integrated_gradients(self, tf_session, lstm_inputs, game_len, intgrd_steps, intgrd_batchsize, verbose=True):
"""Compute integrated gradients
batchsize != 1 currently not supported; Variable sizes for n_timesteps may be used; intgrd_steps must be
dividable by intgrd_batchsize;
Parameters
----------
tf_session : tensorflow session
tensorflow session to compute the graph in
lstm_inputs : numpy array
Pre-computed input activations for reward redistribution LSTM network of shape (batchsize, n_timesteps, f)
game_len : int
Number of timesteps in game
intgrd_steps : int
Number of steps/interpolations to use in integrated gradients
intgrd_batchsize : int
Batchsize to use when parallelizing integrated gradients computation
verbose : bool
Enable printing to console?
Returns
----------
numpy array
Numpy array with integrated grdients signal of shape (n_timesteps,)
float
Quality measure for integrated gradients in percent (see RUDDER paper)
float
Reward redistribution model output for zeroed input sequence for rescaling of signal
"""
if verbose:
print(" started integrated gradients with {} intgrd-steps...".format(intgrd_steps), end="")
starttime = time.time()
sys.stdout.flush()
# Create multiplier for interpolating between full and zeroed input sequence
intgrd_w = np.linspace(0, 1, num=intgrd_steps, dtype=np.float32)
intgrds = None
zero_sample_pred = None
full_sample_pred = None
# Set up minibatches
n_mbs = int(intgrd_steps / intgrd_batchsize)
if (intgrd_steps % intgrd_batchsize) != 0:
raise ValueError("intgrd stepsize not dividable by intgrd batchsize!")
#
# Loop over minibatches and compute integrated gradients
#
for mb in range(n_mbs):
if verbose:
print(".", end="")
curr_intgrd_w = intgrd_w[mb * intgrd_batchsize:(mb + 1) * intgrd_batchsize]
placeholder_values = OrderedDict(
intgrd_input_placeholder=lstm_inputs,
intgrd_w_placeholder=curr_intgrd_w,
game_length_placeholder=game_len
)
feed_dict = dict(((self.placeholders[k], placeholder_values[k]) for k in placeholder_values.keys()))
data_keys = ['intgrd_gradients']
if mb == 0:
# get prediction for 0-sample
data_keys += ['intgrd_pred_first_sample']
if mb == (n_mbs - 1):
# get prediction for full sample
data_keys += ['intgrd_pred_last_sample']
data_tensors = [self.data_tensors[k] for k in data_keys]
ret = tf_session.run(data_tensors, feed_dict)
ret_dict = OrderedDict(((k, ret[i]) for i, k in enumerate(data_keys)))
if intgrds is None:
intgrds = ret_dict['intgrd_gradients']
else:
intgrds += ret_dict['intgrd_gradients']
if mb == 0:
zero_sample_pred = ret_dict['intgrd_pred_first_sample']
if mb == (n_mbs - 1):
full_sample_pred = ret_dict['intgrd_pred_last_sample']
intgrds /= intgrd_steps
#
# Compute percentage of integrated gradients reconstruction quality
#
intgrdssum = intgrds.sum()
diff = full_sample_pred - zero_sample_pred
if diff != 0:
intgrdperc = 100. / diff * intgrdssum
else:
# in case 0-sequence and full-sequence are equal, take a heuristic to decide on intgrdperc
intgrdperc = 100. + intgrdssum
# Integrated gradients shows instabilities at last timestep, so we set them to 0
intgrds[-10:] = 0
if verbose:
print("pred", full_sample_pred, "0pred", zero_sample_pred, "diff", diff, "intgrd", intgrdssum,
"perc", intgrdperc)
print("done! ({}sec)".format(time.time() - starttime))
return intgrds, intgrdperc, zero_sample_pred
``` |
{
"source": "jingweiz/pytorch-dnc",
"score": 2
} |
#### File: core/heads/dynamic_write_head.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils.fake_ops import fake_cumprod
from core.heads.dynamic_head import DynamicHead
class DynamicWriteHead(DynamicHead):
def __init__(self, args):
super(DynamicWriteHead, self).__init__(args)
# params
if self.visualize:
self.win_head = "win_write_head"
# buid model: add additional outs for write
# for location focus: dynamic allocation
self.hid_2_alloc_gate = nn.Linear(self.hidden_dim, self.num_heads * 1)
self.hid_2_write_gate = nn.Linear(self.hidden_dim, self.num_heads * 1)
# for access
self.hid_2_erase = nn.Linear(self.hidden_dim, self.num_heads * self.mem_wid)
self.hid_2_add = nn.Linear(self.hidden_dim, self.num_heads * self.mem_wid) # the write vector in dnc, called "add" in ntm
# logging
self.logger.warning("<-----------------------------------> WriteHeads: {" + str(self.num_heads) + " heads}")
self.logger.warning(self)
# reset
self._reset()
def visual(self):
if self.visualize: # here we visualize the wl_curr of the first batch
self.win_head = self.vis.heatmap(self.wl_curr_vb.data[0].clone().cpu().transpose(0, 1).numpy(), env=self.refs, win=self.win_head, opts=dict(title="write_head"))
def _update_usage(self, prev_usage_vb):
"""
calculates the new usage after writing to memory
variables needed:
prev_usage_vb: [batch_size x mem_hei]
wl_prev_vb: [batch_size x num_write_heads x mem_hei]
returns:
usage_vb: [batch_size x mem_hei]
"""
# calculate the aggregated effect of all write heads
# NOTE: how multiple write heads are delt w/ is not discussed in the paper
# NOTE: this part is only shown in the source code
write_weights_vb = 1. - torch.prod(1. - self.wl_prev_vb, 1)
return prev_usage_vb + (1. - prev_usage_vb) * write_weights_vb
def _allocation(self, usage_vb, epsilon=1e-6):
"""
computes allocation by sorting usage, a = a_t[\phi_t[j]]
variables needed:
usage_vb: [batch_size x mem_hei]
-> indicating current memory usage, this is equal to u_t in
the paper when we only have one write head, but for
multiple write heads, one should update the usage while
iterating through the write heads to take into account the
allocation returned by this function
returns:
alloc_vb: [batch_size x num_write_heads x mem_hei]
"""
# ensure values are not too small prior to cumprod
usage_vb = epsilon + (1 - epsilon) * usage_vb
# NOTE: we sort usage in ascending order
sorted_usage_vb, indices_vb = torch.topk(usage_vb, k=self.mem_hei, dim=1, largest=False)
# to imitate tf.cumrprod(exclusive=True) https://discuss.pytorch.org/t/cumprod-exclusive-true-equivalences/2614/8
cat_sorted_usage_vb = torch.cat((Variable(torch.ones(self.batch_size, 1)).type(self.dtype), sorted_usage_vb), 1)[:, :-1]
# TODO: seems we have to wait for this PR: https://github.com/pytorch/pytorch/pull/1439
prod_sorted_usage_vb = fake_cumprod(cat_sorted_usage_vb)
# prod_sorted_usage_vb = torch.cumprod(cat_sorted_usage_vb, dim=1) # TODO: use this once the PR is ready
# alloc_weight_vb = (1 - sorted_usage_vb) * prod_sorted_usage_vb # equ. (1) # 0.1.12
alloc_weight_vb = (1 - sorted_usage_vb) * prod_sorted_usage_vb.squeeze() # equ. (1) # 0.2.0
_, indices_vb = torch.topk(indices_vb, k=self.mem_hei, dim=1, largest=False)
alloc_weight_vb = alloc_weight_vb.gather(1, indices_vb)
return alloc_weight_vb
def _location_focus(self, usage_vb):
"""
Calculates freeness-based locations for writing to.
This finds unused memory by ranking the memory locations by usage, for
each write head. (For more than one write head, we use a "simulated new
usage" which takes into account the fact that the previous write head
will increase the usage in that area of the memory.)
variables needed:
usage_vb: [batch_size x mem_hei]
-> representing current memory usage
write_gate_vb: [batch_size x num_write_heads x 1]
-> /in [0, 1] indicating how much each write head
does writing based on the address returned here
(and hence how much usage increases)
returns:
alloc_weights_vb: [batch_size x num_write_heads x mem_hei]
-> containing the freeness-based write locations
Note that this isn't scaled by `write_gate`;
this scaling must be applied externally.
"""
alloc_weights_vb = []
for i in range(self.num_heads):
alloc_weights_vb.append(self._allocation(usage_vb))
# update usage to take into account writing to this new allocation
# NOTE: checked: if not operate directly on _vb.data, then the _vb
# NOTE: outside of this func will not change
usage_vb += (1 - usage_vb) * self.write_gate_vb[:, i, :].expand_as(usage_vb) * alloc_weights_vb[i]
# pack the allocation weights for write heads into one tensor
alloc_weight_vb = torch.stack(alloc_weights_vb, dim=1)
self.wl_curr_vb = self.write_gate_vb.expand_as(alloc_weight_vb) * (self.alloc_gate_vb.expand_as(self.wc_vb) * alloc_weight_vb + \
(1. - self.alloc_gate_vb.expand_as(self.wc_vb)) * self.wc_vb)
def _access(self, memory_vb): # write
"""
variables needed:
wl_curr_vb: [batch_size x num_heads x mem_hei]
erase_vb: [batch_size x num_heads x mem_wid]
-> /in (0, 1)
add_vb: [batch_size x num_heads x mem_wid]
-> w/ no restrictions in range
memory_vb: [batch_size x mem_hei x mem_wid]
returns:
memory_vb: [batch_size x mem_hei x mem_wid]
NOTE: IMPORTANT: https://github.com/deepmind/dnc/issues/10
"""
# first let's do erasion
weighted_erase_vb = torch.bmm(self.wl_curr_vb.contiguous().view(-1, self.mem_hei, 1),
self.erase_vb.contiguous().view(-1, 1, self.mem_wid)).view(-1, self.num_heads, self.mem_hei, self.mem_wid)
keep_vb = torch.prod(1. - weighted_erase_vb, dim=1)
memory_vb = memory_vb * keep_vb
# finally let's write (do addition)
return memory_vb + torch.bmm(self.wl_curr_vb.transpose(1, 2), self.add_vb)
def forward(self, hidden_vb, memory_vb, usage_vb):
# content focus
super(DynamicWriteHead, self).forward(hidden_vb, memory_vb)
# location focus
self.alloc_gate_vb = F.sigmoid(self.hid_2_alloc_gate(hidden_vb)).view(-1, self.num_heads, 1)
self.write_gate_vb = F.sigmoid(self.hid_2_write_gate(hidden_vb)).view(-1, self.num_heads, 1)
self._location_focus(usage_vb)
self.wl_prev_vb = self.wl_curr_vb
# access
self.erase_vb = F.sigmoid(self.hid_2_erase(hidden_vb)).view(-1, self.num_heads, self.mem_wid)
self.add_vb = F.tanh(self.hid_2_add(hidden_vb)).view(-1, self.num_heads, self.mem_wid)
return self._access(memory_vb)
def _update_link(self, prev_link_vb, prev_preced_vb):
"""
calculates the new link graphs
For each write head, the link is a directed graph (represented by a
matrix with entries in range [0, 1]) whose vertices are the memory
locations, and an edge indicates temporal ordering of writes.
variables needed:
prev_link_vb: [batch_size x num_heads x mem_hei x mem_wid]
-> {L_t-1}, previous link graphs
prev_preced_vb: [batch_size x num_heads x mem_hei]
-> {p_t}, the previous aggregated precedence
-> weights for each write head
wl_curr_vb: [batch_size x num_heads x mem_hei]
-> location focus of {t}
returns:
link_vb: [batch_size x num_heads x mem_hei x mem_hei]
-> {L_t}, current link graph
"""
write_weights_i_vb = self.wl_curr_vb.unsqueeze(3).expand_as(prev_link_vb)
write_weights_j_vb = self.wl_curr_vb.unsqueeze(2).expand_as(prev_link_vb)
prev_preced_j_vb = prev_preced_vb.unsqueeze(2).expand_as(prev_link_vb)
prev_link_scale_vb = 1 - write_weights_i_vb - write_weights_j_vb
new_link_vb = write_weights_i_vb * prev_preced_j_vb
link_vb = prev_link_scale_vb * prev_link_vb + new_link_vb
# Return the link with the diagonal set to zero, to remove self-looping edges.
# TODO: set diag as 0 if there's a specific method to do that w/ later releases
diag_mask_vb = Variable(1 - torch.eye(self.mem_hei).unsqueeze(0).unsqueeze(0).expand_as(link_vb)).type(self.dtype)
link_vb = link_vb * diag_mask_vb
return link_vb
def _update_precedence_weights(self, prev_preced_vb):
"""
calculates the new precedence weights given the current write weights
the precedence weights are the "aggregated write weights" for each write
head, where write weights with sum close to zero will leave the
precedence weights unchanged, but with sum close to one will replace the
precedence weights.
variables needed:
prev_preced_vb: [batch_size x num_write_heads x mem_hei]
wl_curr_vb: [batch_size x num_write_heads x mem_hei]
returns:
preced_vb: [batch_size x num_write_heads x mem_hei]
"""
# write_sum_vb = torch.sum(self.wl_curr_vb, 2) # 0.1.12
write_sum_vb = torch.sum(self.wl_curr_vb, 2, keepdim=True) # 0.2.0
return (1 - write_sum_vb).expand_as(prev_preced_vb) * prev_preced_vb + self.wl_curr_vb
def _temporal_link(self, prev_link_vb, prev_preced_vb):
link_vb = self._update_link(prev_link_vb, prev_preced_vb)
preced_vb = self._update_precedence_weights(prev_preced_vb)
return link_vb, preced_vb
``` |
{
"source": "JingweiZuo/SE2TeC",
"score": 3
} |
#### File: SE2TeC/Algo_centralized/timeseries.py
```python
import numpy as np
from collections import defaultdict
import sys
from typing import Any
class TimeSeries(object):
def __init__(self):
self.class_timeseries = ''
self.dimension_name = ''
self.timeseries = None
self.matched = False
self.name = ''
def __repr__(self):
representation = "Timeseries with dimension: " + self.dimension_name
representation += " with class: " + str(self.class_timeseries)
representation += " with series: " + str(self.timeseries)
return representation
def __str__(self):
representation = "Timeseries with dimension: " + self.dimension_name
representation += " with class: " + str(self.class_timeseries)
representation += " with series: " + str(self.timeseries)
return representation
@staticmethod
def groupByClass_timeseries(list_timeseries):
#'list_timeseries': {ts_name1:ts1, ts_name2:ts2, ...}
dict_ts = {} # type: dict
for ts in list_timeseries.values():
ts_class = ts.class_timeseries
if ts_class in dict_ts.keys():
dict_ts[ts.class_timeseries].append(ts)
else:
dict_ts[ts.class_timeseries] = [ts]
return dict_ts
@staticmethod
def generate_timeseries(unid):
unid_timeseries = []
for cuts in unid:
target_class = cuts[-1]
ts_str = cuts[:-1]
ts = [float(element) for element in ts_str]
timeseries = TimeSeries()
timeseries.class_timeseries = target_class
timeseries.timeseries = np.array(ts)
unid_timeseries.append(timeseries)
return unid_timeseries
```
#### File: Algo_centralized/utils/old_Utils.py
```python
import os
import numpy as np
import collections
import pickle
import sys
import psutil as ps
import random
import csv
import json
from USE.timeseries import TimeSeries
class old_Utils(object):
SHAPELET_DIRNAME = "/UniShapelets/"
SHAPELET_EXT = ".shapelet"
SEQUENCE_DIRNAME = "/Sequences/"
SEQUENCE_EXT = ".sequence"
CSV_DIRNAME = "/csv_shapelet/"
CSV_EXT = ".csv"
JSON = SEQUENCE_DIRNAME + "json/"
@staticmethod
def convert_csv_to_multivariate_timeseries(directory):
files_list = [f for f in os.listdir(directory) if f.lower().endswith('.csv')]
list_multivariate_timeseries = []
for file in files_list:
multivariate_timeseries = TimeSeries.generate_from_file(directory, file)
list_multivariate_timeseries.append(multivariate_timeseries)
return list_multivariate_timeseries
# @staticmethod
# def save_shapelets(directory, list_shapelets):
# shapelet_dir = directory + old_Utils.DIRNAME
# if not os.path.exists(shapelet_dir):
# os.makedirs(shapelet_dir)
# for aShapelet in list_shapelets:
# file_name = aShapelet.name + aShapelet.dimension_name + old_Utils.EXTENSION
# path = shapelet_dir + file_name
# pickle.dump(aShapelet, open(path, "wb"))
@staticmethod
def save_json(directory, list_sequences):
folder = directory + old_Utils.JSON
if not os.path.exists(folder):
os.makedirs(folder)
else:
files_list = [f for f in os.listdir(folder) if f.lower().endswith('.json')]
for file in files_list:
path = folder + file
os.remove(path)
for aSequence in list_sequences:
file_name = str(aSequence.name) + aSequence.dimension_name + '.json'
path = folder + file_name
with open(path, 'w') as outfile:
outfile.write(aSequence.__repr__())
# j = jsonpickle.encode(aSequence.sequence[0], make_refs=False)
# outfile.write(j)
# outfile.flush()
# outfile.close()
@staticmethod
def json_list(representation, name, listos):
representation += '"' + name + '":['
for s in listos:
representation += '' + str(s) + ','
if len(listos):
representation = representation[:-1]
representation += ']'
return representation
@staticmethod
def json_dict(representation, name, dictos, key='list'):
representation += '"' + name + '":{'
for k, i in dictos.items():
if key == 'list':
representation = old_Utils.json_list(representation, k, i)
representation += ','
else:
representation += '"' + k + '": '
representation += '' + str(i) + ','
representation = representation[:-1]
representation += '}'
return representation
@staticmethod
def save(directory, list_objects, option):
if option.lower() == 'shapelet':
dirname = old_Utils.SHAPELET_DIRNAME
extension = old_Utils.SHAPELET_EXT
elif option.lower() == 'sequence':
dirname = old_Utils.SEQUENCE_DIRNAME
extension = old_Utils.SEQUENCE_EXT
elif option.lower() == 'csv':
dirname = old_Utils.CSV_DIRNAME
extension = old_Utils.CSV_EXT
folder = directory + dirname
if not os.path.exists(folder):
os.makedirs(folder)
else:
##clean the historical files
files_list = [f for f in os.listdir(directory + dirname) if f.lower().endswith(extension)]
for file in files_list:
path = directory + dirname + file
os.remove(path)
if option.lower() != 'csv':
for anObject in list_objects:
file_name = str(anObject.name) + anObject.dimension_name + extension
path = folder + file_name
pickle.dump(anObject, open(path, "wb"))
else:
file_name = "shapelet_test" + extension
path = folder + file_name
i = 0
with open(path, 'w') as f:
writer = csv.writer(f, lineterminator='\n', delimiter=';',)
for anObject in list_objects:
i+=1
for key in anObject.matching_indices:
writer.writerow(["shapelet" + str(i), anObject.name, anObject.dimension_name, anObject.class_shapelet, key, anObject.matching_indices[key], anObject.gain, anObject.subsequence.tolist(), anObject.min_distance] )
@staticmethod
def load(directory, option):
if option.lower() == 'shapelet':
dirname = old_Utils.SHAPELET_DIRNAME
extension = old_Utils.SHAPELET_EXT
elif option.lower() == 'sequence':
dirname = old_Utils.SEQUENCE_DIRNAME
extension = old_Utils.SEQUENCE_EXT
files_list = [f for f in os.listdir(directory + dirname) if f.lower().endswith(extension)]
list_objects = []
for file in files_list:
path = directory + dirname + file
an_object = pickle.load(open(path, "rb"))
list_objects.append(an_object)
return list_objects
# @staticmethod
# def load_shapelets(directory):
# files_list = [f for f in os.listdir(directory + old_Utils.DIRNAME) if f.lower().endswith(old_Utils.EXTENSION)]
# list_shapelets = []
# for file in files_list:
# path = directory + old_Utils.DIRNAME + file
# shapelet = pickle.load(open(path, "rb"))
# list_shapelets.append(shapelet)
# return list_shapelets
# A helper generator method to create a sliding window over a list and return all the sub-lists
@staticmethod
def sliding_window(sequence, win_size, step=1):
# Verify the inputs
try:
it = iter(sequence)
except TypeError:
raise Exception("**ERROR** sequence must be iterable.")
if not ((type(win_size) == type(0)) and (type(step) == type(0))):
raise Exception("**ERROR** type(winSize) and type(step) must be int.")
if step > win_size:
raise Exception("**ERROR** step must not be larger than winSize.")
if win_size > len(sequence):
raise Exception("**ERROR** winSize must not be larger than sequence length.")
# Pre-compute number of chunks to emit
num_chunks = ((len(sequence) - win_size) / step) + 1
# Do the work
for i in range(0, int(num_chunks * step), step):
yield sequence[i:i + win_size]
# A helper method that helps to calculate the entropy
@staticmethod
def entropy_helper(n):
return - (n * np.log2(n))
# A helper method to calculate the entropy of the data set
@staticmethod
def dataset_entropy(unid):
classes_dict = collections.defaultdict(list)
for timeseries in unid:
classes_dict[timeseries.class_timeseries].append(timeseries)
entropy = 0.0
for key in classes_dict.keys():
#print("key class is ", str(key))
p = float(len(classes_dict[key])) / len(unid)
#print("p is ", str(p))
entropy += old_Utils.entropy_helper(p)
#print("entropy is ", entropy)
return entropy
@staticmethod
def entropy_after_split(unid1, unid2):
f1 = float(len(unid1)) / (len(unid1) + len(unid2))
f2 = float(len(unid2)) / (len(unid1) + len(unid2))
#print("f1 is ", str(f1), "f2 is", str(f2))
entropy1 = f1 * old_Utils.dataset_entropy(unid1)
#e1class = [ts.class_timeseries for ts in unid1]
#e2class = [ts.class_timeseries for ts in unid2]
#print("unid1.class is: ", e1class)
#print("unid2.class is: ", e2class)
entropy2 = f2 * old_Utils.dataset_entropy(unid2)
#print("entropy1 + entropy2 is ", entropy1 + entropy2)
return entropy1 + entropy2
@staticmethod
def print_progress(iteration, total, prefix='Progress:', suffix='Complete', decimals=1, barLength=70):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
"""
format_str = "{0:." + str(decimals) + "f}"
percent = format_str.format(100 * (iteration / float(total)))
filled_length = int(round(barLength * iteration / float(total)))
bar = '█' * filled_length + '-' * (barLength - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
@staticmethod
def min_length_dataset(list_timeseries):
min_l = sys.maxsize
for mts in list_timeseries:
if len(mts.timeseries) < min_l:
min_l = len(mts.timeseries)
return min_l
@staticmethod
def max_length_dataset(list_multivariate_timeseries):
max_l = 0
for mts in list_multivariate_timeseries:
if mts.length() > max_l:
max_l = mts.length()
return max_l
@staticmethod
def check_memory(perc=90):
mem = ps.virtual_memory()
if mem.percent >= perc:
return False
return True
@staticmethod
def get_random_color(pastel_factor=0.5):
return [(x + pastel_factor) / (1.0 + pastel_factor) for x in [random.uniform(0, 1.0) for i in [1, 2, 3]]]
@staticmethod
def color_distance(c1, c2):
return sum([abs(x[0] - x[1]) for x in zip(c1, c2)])
@staticmethod
def generate_new_color(existing_colors, pastel_factor=0.5):
max_distance = None
best_color = None
for i in range(0, 100):
color = old_Utils.get_random_color(pastel_factor=pastel_factor)
if not existing_colors:
return color
best_distance = min([old_Utils.color_distance(color, c) for c in existing_colors])
if not max_distance or best_distance > max_distance:
max_distance = best_distance
best_color = color
return best_color
@staticmethod
def generate_new_colors(k=10):
colors = []
for i in range(k):
colors.append(old_Utils.generate_new_color(colors, pastel_factor=0.9))
return colors
``` |
{
"source": "JingweiZuo/SMATE",
"score": 2
} |
#### File: mtsc_ca_sfcn/src/fcn_ca_main.py
```python
import sys
import numpy as np
from sklearn.metrics import accuracy_score
from fileio.data_io import train_test_file_reading, data_group_processing, list_files, init_folder
from fileio.log_io import init_logging
from fileio.parameter_proc import read_all_feature_classification
from tensor_model.model_cnn import run_cnn
from tensor_model.model_setting import return_cnn_setting_from_file
from utils.classification_results import averaged_class_based_accuracy
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"]="-1"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
# from classification_results import predict_matrix_with_prob_to_predict_accuracy
# from classification_results import f1_value_precision_recall_accuracy
# This is a multi-class classification using CNN model. Using Accuracy instead of F1 as measurement
# Just classification, no need to store the output objects
def cnn_classification_main(parameter_file, file_keyword, attention_type, function_keyword="fcn_classification"):
data_keyword, data_folder, attr_num, attr_len, num_classes, start_class, class_column, class_id, obj_folder, method, log_folder, out_obj_folder, out_model_folder, cnn_setting_file, learning_rate = read_all_feature_classification(parameter_file, function_keyword)
print(data_keyword, data_folder, attr_num, attr_len, num_classes, start_class, class_column, class_id, obj_folder, method, log_folder, out_obj_folder, out_model_folder, cnn_setting_file)
log_folder = init_folder(log_folder)
out_obj_folder = init_folder(out_obj_folder)
out_model_folder = init_folder(out_model_folder)
file_list = list_files(data_folder)
file_count = 0
class_column = 0
header = True
cnn_setting = return_cnn_setting_from_file(cnn_setting_file)
conv_num = len(cnn_setting.conv_kernel_list)
cnn_setting.out_obj_folder = out_obj_folder
cnn_setting.out_model_folder = out_model_folder
cnn_setting.learning_rate = learning_rate
#cnn_setting.attention_type = 0 # 0: apply ra then sa attentions
# -1: No attentions
#cnn_setting.attention_type = -1
#cnn_setting.attention_type = 1 # Using the global attention mechnizm
#cnn_setting.attention_type = 2 # Using the input attention from https://arxiv.org/pdf/1704.02971.pdf
cnn_setting.attention_type = attention_type
cnn_setting.cross_entropy_type = -1 # 0: apply the class-based cross-entropy
# -1: apply the normal cross-entropy
#cnn_setting.cross_entropy_type = 0
init_folder(out_obj_folder)
init_folder(out_model_folder)
result_obj_folder = obj_folder + method +"_result_folder"
result_obj_folder = init_folder(result_obj_folder)
delimiter = ' '
loop_count = -1
saver_file_profix = ""
trans_bool = False # True: means ins * attr_len * 1 * attr_num
# False: means ins * attr_len * attr_num * 1
for train_file in file_list:
if file_keyword not in train_file:
continue
loop_count = loop_count + 1
file_key = train_file.replace('.txt', '')
saver_file_profix = file_key + "_atten" + str(cnn_setting.attention_type)
valid_file = data_folder + train_file.replace('train', 'valid')
if os.path.isfile(valid_file) is False:
valid_file = ''
test_file = data_folder + train_file.replace('train', 'test')
if os.path.isfile(test_file) is False:
test_file = ''
data_group, attr_num = train_test_file_reading(data_folder + train_file, test_file, valid_file, class_column, delimiter, header)
data_group_processing(data_group, attr_num, trans_bool)
data_stru = data_group.gene_data_stru()
data_group.data_check(data_stru.num_classes, data_stru.min_class)
cnn_eval_key = cnn_setting.eval_method
if cnn_eval_key == "f1":
if num_classes > 2:
cnn_eval_key = "acc"
log_file = log_folder + data_keyword + '_' + file_key + '_' + function_keyword + "_act" + str(cnn_setting.activation_fun) + "_" + cnn_eval_key + "_attention" + str(cnn_setting.attention_type) + "_conv" + str(conv_num) + '.log'
print("log file: " + log_file)
logger = init_logging(log_file)
logger.info('\nlog file: ' + log_file)
logger.info(train_file)
logger.info('cnn setting:\n ' + cnn_setting.to_string())
logger.info('method: ' + method)
logger.info('============')
if file_count == 0:
logger.info('train matrix shape: ' + str(data_group.train_x_matrix.shape))
logger.info('train label shape: ' + str(data_group.train_y_vector.shape))
logger.info(data_group.train_x_matrix[0, 0:3, 0:2, 0])
eval_value, train_run_time, test_run_time, cnn_model = run_cnn(cnn_setting, data_group, saver_file_profix, logger)
#pred_y_vector = np.argmax(pred_y_prob, axis=1)
#avg_acc, ret_str = averaged_class_based_accuracy(pred_y_vector, data_group.test_y_vector)
#acc_value = accuracy_score(data_group.test_y_vector, pred_y_vector, True)
#logger.info("Averaged acc: " + str(acc_value))
#logger.info(ret_str)
logger.info("Fold accuracy: " + str(eval_value))
logger.info(method + ' fold training time (sec):' + str(train_run_time))
logger.info(method + ' fold testing time (sec):' + str(test_run_time))
#logger.info("save obj to " + cnn_model.saver_file)
if __name__ == '__main__':
argv_array = sys.argv
run_stdout = sys.stdout
file_keyword = 'train'
projected = True
len_argv_array = len(argv_array)
if len_argv_array == 3:
try:
data_key = argv_array[1]
attention_type = int(argv_array[2])
except ValueError:
print("That's not an int!")
else:
raise Exception("Unkonwn parameter detected! Please follow the format #python fcn_ca_main.py <DATA_NAME> <ATTENTION_TYPE>")
print("dataset: " + data_key)
print("attention type: " + str(attention_type))
parameter_file = '../parameters/all_feature_classification_' + data_key.lower() + ".txt"
print(parameter_file)
cnn_classification_main(parameter_file, file_keyword, attention_type)
```
#### File: src/fileio/data_checking.py
```python
import logging
import os
from os.path import isfile, join
import numpy as np
from data_io import file_reading
from data_io import x_y_spliting
#import matplotlib.pyplot as plt
def data_plot(data_file, class_column=0, delimiter=' '):
x_matrix, attr_num = file_reading(data_file, delimiter, True)
x_matrix, y_vector = x_y_spliting(x_matrix, class_column)
y_min = min(y_vector)
y_max = max(y_vector)
x_row, x_col = x_matrix.shape
attr_len = x_col/attr_num
x_matrix = x_matrix.reshape(x_row, attr_num, attr_len)
for label in range(y_min, y_max):
out_pdf = "asl_class_" + str(label) + ".pdf"
fig = plt.figure()
label_index = np.where(y_vector==label)[0]
label_row = x_matrix[label_index[0], :, :]
for attr in range(0, attr_num):
plot_series = label_row[attr, :]
plot_len = len(plot_series)
stop_i = plot_len
for i in range(0, plot_len):
re_i = plot_len - i - 1
if plot_series[re_i] == 0:
stop_i = stop_i - 1
else:
break
plt.plot(plot_series[0:stop_i])
fig.savefig(out_pdf, dpi=fig.dpi)
def data_checking(data_file, class_column=0, delimiter=' '):
ret_str = ""
x_matrix, attr_num = file_reading(data_file, delimiter, True)
x_matrix, y_vector = x_y_spliting(x_matrix, class_column)
ret_str = 'x_matrix shape: ' + str(x_matrix.shape)
y_min = min(y_vector)
y_max = max(y_vector)
ret_str = ret_str + "\nclass labels from " + str(y_min) + " to " + str(y_max)
#for i in range(y_min, y_max+1):
# ret_str = ret_str + '\nclass '+ str(i) + ': '+str(y_vector.count(i))
unique, counts = np.unique(y_vector, return_counts=True)
ret_str = ret_str +'\n'+ str(dict(zip(unique, counts)))
return ret_str
def arc_reduce_null(fname, null_class=1, null_max=1000, class_column=0, delimiter=' ', header = True):
num = 0
data_matrix = []
null_count = 0
with open(fname) as f:
data_row = []
for line in f:
if header == True:
attr_num = int(line.strip())
header = False
continue
data_row = line.split(delimiter)
if int(data_row[class_column]) == null_class:
null_count = null_count + 1
if null_count < null_max:
data_matrix.append(data_row)
else:
data_matrix.append(data_row)
row_num = len(data_matrix)
col_num = len(data_matrix[0])
data_matrix = np.array(data_matrix, dtype=float).reshape(row_num, col_num)
data_matrix.astype(float)
y_vector = data_matrix[:, class_column].astype(int)
return np.delete(data_matrix, class_column, 1), y_vector
if __name__ == '__main__':
#data_file = '../../data/gesture_data/processed_data/data.txt_trainTest10/train_0.txt'
#data_file = '../../data/arc_activity_recognition/s1_ijcal/train.txt'
#class_column = 0
#delimiter = ' '
#ret_str = data_checking(data_file, class_column, delimiter)
#print ret_str
#data_file = '../../data/arc_activity_recognition/s1_ijcal/test.txt'
#class_column = 0
#delimiter = ' '
#ret_str = data_checking(data_file, class_column, delimiter)
#print ret_str
data_file = '../../data/evn/ds/DS_all_ready_to_model.csv_trainTest2_weekly_3attr/test_0.txt'
#data_file = '../../data/human/subject10_ideal.log'
#class_column = 119
#delimiter = '\t'
##null_class=1
##null_max=1000
##x_matrix, y_vector = readFile(data_file, null_class, null_max, class_column);
##print x_matrix.shape
##print y_vector.shape
#
#data_file = '../../data/human/processed/ready/data.txt'#_trainTest10/train_0.txt'
#class_column = 0
#delimiter = ' '
#ret_str = data_checking(data_file, class_column, delimiter)
#print ret_str
data_file = '../../data/dsa/train_test_10_fold/test_0.txt'
#data_file = '../../data/dsa/output.txt'
#data_file = '../../data/rar/train_test_10_fold_class_based/train_0.txt_class_0.txt'
#data_file = "../../data/arabic/train_test_1_fold/train_0.txt"
#data_file = "../../data/arabic/train_test_1_fold/test_0.txt"
#data_file = "../../data/asl/train_test_3_fold/train_0.txt"
#data_file = '../../data/rar/train_test_10_fold/test_0.txt'
#data_file = '../../data/arc/train_test_10_fold/test_0.txt'
#data_file = '../../data/fixed_arc/train_test_1_fold/test_0.txt'
data_key = "phs"
data_key = "eeg"
#data_key = "fad"
data_file = "../../data/" + data_key +"/train.txt"
class_column = 0
delimiter = ' '
#data_plot(data_file, class_column, delimiter)
ret_str = data_checking(data_file, class_column, delimiter)
print(ret_str)
```
#### File: src/fileio/data_processing.py
```python
import numpy as np
import sys
import time
import os
#from generation_model import Generation_model
class data_collection:
train_x_matrix = None
train_y_vector = None
valid_x_matrix = None
valid_y_vector = None
test_x_matrix = None
test_y_vector = None
train_y_matrix = None
valid_y_matrix = None
test_y_matrix = None
num_classes = 0
min_class = 0
class_column = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __init__(self, train_x_matrix, train_y_vector, class_column=0):
self.train_x_matrix = train_x_matrix
self.train_y_vector = train_y_vector
self.class_column = class_column
def gene_data_stru(self):
train_x_matrix = self.train_x_matrix
if train_x_matrix is None:
raise Exception("Missing training data")
train_y_vector = self.train_y_vector
train_shape_len = len(train_x_matrix.shape)
if train_shape_len == 3:
train_ins, attr_len, attr_num = train_x_matrix.shape
input_map = 1
train_x_matrix = train_x_matrix.reshape(train_ins, attr_len, attr_num, input_map)
elif train_shape_len == 4:
train_ins, attr_len, attr_num, input_map = train_x_matrix.shape
else:
raise Exception("Input x matrix invalid shape!!!")
min_class = min(train_y_vector)
max_class = max(train_y_vector)
num_classes = max_class - min_class + 1
return return_data_stru(num_classes, min_class, attr_num, attr_len, self.class_column, train_ins)
def data_check(self, num_classes, min_class):
if self.train_y_vector is not None:
self.train_y_matrix = y_vector_to_matrix(self.train_y_vector, num_classes, min_class)
if self.test_y_vector is not None:
self.test_y_matrix = y_vector_to_matrix(self.test_y_vector, num_classes, min_class)
if self.valid_y_vector is not None:
self.valid_y_matrix = y_vector_to_matrix(self.valid_y_vector, num_classes, min_class)
elif self.test_y_vector is not None:
self.valid_x_matrix = self.test_x_matrix
self.valid_y_vector = self.test_y_vector
self.valid_y_matrix = self.test_y_matrix
########################################################################################
## data structure part
# starting from min_class, max_class = min_class + num_classes - 1
# It means that all numbers between min_class to max_class should be used as a label
class data_structure:
def __init__(self, num_classes, min_class, attr_num, attr_len, class_c=0, train_ins=-1):
self.num_classes = num_classes
self.attr_num = attr_num
self.attr_len = attr_len
self.class_column = class_c
self.train_ins = train_ins
self.min_class = min_class
def print_to_string(self):
ret_str = 'num of classes: ' + str(self.num_classes) +'\nattribute number: ' + str(self.attr_num) +'\nattribute length: ' + str(self.attr_len) +'\nclass column: ' + str(self.class_column) +'\ntrain_ins: ' + str(self.train_ins)
return ret_str
def return_data_stru(num_classes, min_class, attr_num, attr_len, class_column, train_ins=-1):
return data_structure(num_classes, min_class, attr_num, attr_len, class_column, train_ins)
def data_stru_gene(train_y_vector, class_colum=0):
min_class = min(train_y_vector)
max_class = max(train_y_vector)
num_clasess = max_class - min_class + 1
def copy_data_stru(in_data_stru):
return data_structure(in_data_stru.num_classes, in_data_stru.start_class, in_data_stru.attr_num, in_data_stru.attr_len)
## end of data structure part
########################################################################################
def train_test_transpose(data_matrix, attr_num, attr_len, trans=True):
data_row, data_col = data_matrix.shape
data_matrix = data_matrix.reshape(data_row, attr_num, attr_len, 1)
if trans == True:
data_matrix = np.transpose(data_matrix, (0, 2, 3, 1))
else:
data_matrix = np.transpose(data_matrix, (0, 2, 1, 3))
#data_matrix = data_matrix.reshape(data_row, data_col)
return data_matrix
def y_vector_to_matrix(y_vector, num_classes, start_class=0):
vector_len = len(y_vector)
# print y_vector
# print vector_len
# print num_classes
# print "========"
y_matrix = np.zeros((vector_len, num_classes))
count = 0
for item in y_vector:
y_matrix[count, int(item)-start_class] = int(1)
count = count + 1
return y_matrix
def class_label_vector_checking(y_vector):
min_class = min(y_vector)
max_class = max(y_vector)
class_index_dict = {}
min_length = -1
max_length = -1
for c in range(min_class, max_class+1):
c_index = np.where(y_vector==c)[0]
class_index_dict[c] = c_index
if min_length == -1:
min_length = len(c_index)
elif len(c_index) < min_length:
min_length = len(c_index)
if max_length == -1:
max_length = len(c_index)
elif len(c_index) > max_length:
max_length = len(c_index)
return class_index_dict, min_length, max_length
def feature_data_generation_4d(data_matrix, feature_index_list):
row_n, attr_len, num_map, attr_num = data_matrix.shape
ret_matrix = []
new_row_col = 0
new_attr = len(feature_index_list)
new_row_col = new_attr * attr_len
for i in range(0, row_n):
ori_matrix = data_matrix[i].reshape(attr_len, attr_num)
matrix = ori_matrix[:, feature_index_list]
ret_matrix.append(matrix.reshape(new_row_col))
data_matrix = np.array(ret_matrix).reshape(row_n, new_row_col)
return np.array(ret_matrix).reshape(row_n, new_row_col), new_attr
def feature_data_generation(data_matrix, attr_len, attr_num, feature_index_list):
row_n, col_n = data_matrix.shape
ret_matrix = []
new_row_col = 0
new_attr = len(feature_index_list)
new_row_col = new_attr * attr_len
for i in range(0, row_n):
ori_matrix = data_matrix[i].reshape(attr_len, attr_num)
matrix = ori_matrix[:, feature_index_list]
ret_matrix.append(matrix.reshape(new_row_col))
data_matrix = np.array(ret_matrix).reshape(row_n, new_row_col)
return np.array(ret_matrix).reshape(row_n, new_row_col), new_attr
def feature_data_generation_v1(data_matrix, attr_num, feature_index_list, group_list=[]):
row_n, col_n = data_matrix.shape
attr_len = col_n/attr_num
ret_matrix = []
new_row_col = 0
new_attr = len(feature_index_list)
if len(group_list) > 0:
for group in group_list:
new_attr = new_attr + len(group)
new_row_col = new_attr * attr_len
for i in range(0, row_n):
ori_matrix = data_matrix[i].reshape(attr_num, attr_len)
if len(group_list) > 0:
group_count = 0
for group in group_list:
if group_count == 0:
matrix = ori_matrix[group, :]
else:
matrix = np.append(matrix, ori_matrix[group, :])
group_count = group_count + 1
matrix = np.append(matrix, ori_matrix[feature_index_list, :])
else:
matrix = ori_matrix[feature_index_list, :]
ret_matrix.append(matrix.reshape(new_row_col))
data_matrix = np.array(ret_matrix).reshape(row_n, new_row_col)
return np.array(ret_matrix).reshape(row_n, new_row_col), new_attr, attr_len
def z_normlization(time_series):
mean = np.mean(time_series)
dev = np.std(time_series)
return (time_series - mean)/dev
if __name__ == '__main__':
series1 = [2.02, 2.33, 2.99, 6.85, 9.20, 8.80, 7.50, 6.00, 5.85, 3.85, 4.85, 3.85, 2.22, 1.45, 1.34]
series2 = [-0.12, -0.16, -0.13, 0.28, 0.37, 0.39, 0.18, 0.09, 0.15, -0.06, 0.06, -0.07, -0.13, -0.18, -0.26]
norm_1 = z_normlization(series1)
norm_2 = z_normlization(series2)
x = range(0, len(series1))
#data_str = 'uci'
#uci_data_stru = return_data_stru(data_str)
##uci_data_stru.num_classes = 3
##uci_data_stru.start_class = 11
#uci_data_stru.print_to_string()
#
row_num = 1
data_matrix = np.random.rand(row_num, 24)
max_gap = 0
attr_num=6
feature_index_list = np.array([row_num, 3])
print (data_matrix.reshape(row_num, attr_num, 4))
model = Generation_model(attr_num, 3, [], 2)
matrix, attr_num, attr_len = feature_data_generation(data_matrix, attr_num, model.selected_list, model.groups)
print ("===")
print (model.selected_list)
print (model.groups)
print (matrix.reshape(row_num, attr_num, attr_len))
###########################################################################
## Duplication part, used for multiple time series data. Do duplication on attributes dimension in order to run CNN
## duplicate rows
#def row_duplication(data_matrix, max_gap):
# ret_matrix = data_matrix.copy()
# row_n, col_n = ret_matrix.shape
# for i in range(2, max_gap+1):
# ret_matrix = np.append(ret_matrix, data_matrix[::i], axis=0)
# ret_matrix = np.append(ret_matrix, data_matrix[1::i], axis=0)
# return ret_matrix
#
#
## duplicate cols
#def col_duplication(data_matrix, max_gap):
# ret_matrix = data_matrix.copy()
# row_n, col_n = ret_matrix.shape
# for i in range(2, max_gap+1):
# ret_matrix = np.append(ret_matrix, data_matrix[:, ::i], axis=1)
# ret_matrix = np.append(ret_matrix, data_matrix[:, 1::i], axis=1)
# return ret_matrix
#
#
## Use to update data matrix in order to generate features based on time dimension
## data_matrix: A * T: A is number of attributes, T is the length of time dimension
## max_gap: In order to run cnn feature detection, we would like to do duplication on attribute dimension. No duplication if map_gap ==0
## data_stru: information for data_matrix
## Logic: 1, do matrix transpose to get data_matrix T * A
## 2, in order to get rid of the effect from attribute order, we do duplication on attribute dimension
## result from step 2 is T * (A + A/2 + A/3 + ... until max_gap)
## return updated data_matrix and updated data_stru
#def time_as_feature_transpose(data_matrix, max_gap):
# data_matrix = np.transpose(data_matrix) # T * A
# if max_gap == 0:
# return data_matrix
#
# data_matrix = col_duplication(data_matrix, max_gap) # data_matrix with duplication on attribute dimension (column dimension)
# return data_matrix
#
#
## We need d3_time_as_feature_transpose because the original data matrix is N * (A * T), N is number of instances, and column lenght is (A * T) which is the number of attributes times attribute length
#def d3_time_as_feature_transpose(d3_data_matrix, max_gap, data_stru):
# attr_num = data_stru.attr_num
# attr_len = data_stru.attr_len # which is the length of time dimension now
#
# row_n, col_n = d3_data_matrix.shape
# ret_data_matrix = list()
# data_row_matrix = d3_data_matrix[0].reshape(attr_num, attr_len)
# data_row_matrix = time_as_feature_transpose(data_row_matrix, max_gap)
# new_row, new_col = data_row_matrix.shape
# new_row_col_all = new_row * new_col
# ret_data_matrix.append(data_row_matrix.reshape(new_row_col_all))
# for i in range(1, row_n):
# data_row_matrix = d3_data_matrix[i].reshape(attr_num, attr_len)
# data_row_matrix = time_as_feature_transpose(data_row_matrix, max_gap)
# ret_data_matrix.append(data_row_matrix.reshape(new_row_col_all))
#
# ret_data_matrix = np.array(ret_data_matrix).reshape(row_n, new_row_col_all)
# ret_data_stru = data_structure(data_stru.num_classes, data_stru.start_class, new_row, new_col, data_stru.class_column)
#
# return ret_data_matrix, ret_data_stru
#
##End of Duplication part
###########################################################################
#
#
###########################################################################
##cross validataion part
#
##Given data matrix (x_matrix) and correspsonding class label vector (y_vector), do cross validation
##Need num_classes to make sure the validataion is balanced for all classes
##ratio: the ratio of testing data
#def cross_validation(x_matrix, y_vector, num_classes, ratio=0.1):
# instance_count = len(y_vector)
# one_class_count = instance_count/num_classes
# start = 0;
# end = start + one_class_count
# train_x_matrix, test_x_matrix, train_y_vector, test_y_vector = train_test_split(x_matrix[start:end, :], y_vector[start:end], test_size=ratio, random_state=0)
# start = end
# end = end + one_class_count
# while(end<=instance_count):
# sub_train_x, sub_test_x, sub_train_y, sub_test_y = train_test_split(x_matrix[start:end, :], y_vector[start:end], test_size=ratio, random_state=0)
# train_x_matrix = np.concatenate((train_x_matrix, sub_train_x), axis = 0)
# test_x_matrix = np.concatenate((test_x_matrix, sub_test_x), axis=0)
# train_y_vector.extend(sub_train_y)
# test_y_vector.extend(sub_test_y)
# start = end
# end = end + one_class_count
# return train_x_matrix, train_y_vector, test_x_matrix, test_y_vector
#
##End of cross validation part
###########################################################################
#
#
###########################################################################
##Using feature to generate partial data
#
##For multiple time series data matrix
##data_matrix: N * M: N is the number of instances, M is a vector to represnet the attr * time matrix
##Need attr_num to reshape the 1*M vector to attr * time matrix
##attr_index_list: numpy array for the key attribute indexes
##time_index_list: numpy array for the key time indexes
#def old_feature_data_generation(data_matrix, attr_num, attr_index_list = None, method='attribute', time_index_list = None):
# row_n, col_n = data_matrix.shape
# time_len = col_n/attr_num
# ret_matrix = []
# new_row_col = 0
# if method == 'attribute':
# new_row = len(attr_index_list)
# new_row_col = new_row * time_len
# for i in range(0, row_n):
# matrix = data_matrix[i].reshape(attr_num, time_len)
# matrix = matrix[attr_index_list, :]
# ret_matrix.append(matrix.reshape(new_row_col))
# attr_num = new_row
# elif method == 'time':
# new_col = len(time_index_list)
# new_row_col = attr_num * new_col
# for i in range(0, row_n):
# matrix = data_matrix[i].reshape(attr_num, time_len)
# matrix = matrix[:, time_index_list]
# ret_matrix.append(matrix.reshape(new_row_col))
# time_len = new_col
# return np.array(ret_matrix).reshape(row_n, new_row_col), attr_num, time_len
#
## input data_matrix: 2d matrix with r * (a * l).
## r number of instances and each instance has a attributes and each attribute has length l
## atr_num: attribute number
## fature_index_list: a list contains the index of picked attributes
## Rturn: return a data matrix only contains the attributes from feature_index_list
#def feature_data_generation(data_matrix, attr_num, feature_index_list, feature_col_update=True):
# row_n, col_n = data_matrix.shape
# attr_len = col_n/attr_num
# ret_matrix = []
# new_row_col = 0
#
# if feature_col_update == True:
# new_row = len(feature_index_list)
# new_row_col = new_row * attr_len
# else:
# new_row = attr_num
# new_row_col = col_n
#
# for i in range(0, row_n):
# matrix = data_matrix[i].reshape(attr_num, attr_len)
# if feature_col_update == True:
# matrix = matrix[feature_index_list, :]
# ret_matrix.append(matrix.reshape(new_row_col))
# else:
# a = range(0, attr_num)
# remove_index = [x for x in a if x not in feature_index_list]
# matrix[remove_index, :] = 0
# ret_matrix.append(matrix.reshape(new_row_col))
#
# attr_num = new_row
# return np.array(ret_matrix).reshape(row_n, new_row_col), attr_num, attr_len
#
#
#
#
#
#
#
#def class_label_vector_checking(y_vector):
# min_class = min(y_vector)
# max_class = max(y_vector)
# class_index_dict = {}
# min_length = -1
# max_length = -1
# for c in range(min_class, max_class+1):
# c_index = np.where(y_vector==c)[0]
# class_index_dict[c] = c_index
# if min_length == -1:
# min_length = len(c_index)
# elif len(c_index) < min_length:
# min_length = len(c_index)
# if max_length == -1:
# max_length = len(c_index)
# elif len(c_index) > max_length:
# max_length = len(c_index)
#
# return class_index_dict, min_length, max_length
```
#### File: src/fileio/log_io.py
```python
import logging
import sys
from datetime import datetime
import os
#import tensorflow as tf
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
#logging.getLogger("tensorflow").setLevel(logging.INFO)
##
# Try not to initialize many loggers at the same run
# def init_logging(log_file=''):
# if log_file != '':
# log_file = datetime.now().strftime(log_file + '_%Y_%m_%d_%H_%M.log')
# log_format = "%(levelname)s %(asctime)-15s [%(lineno)d] %(funcName)s: %(message)s"
# if log_file == '':
# logging.basicConfig(format=log_format, level=logging.INFO, stream=sys.stdout)
# else:
# logging.basicConfig(filename=log_file, filemode='w', format=log_format, level=logging.INFO)
# logger = logging.getLogger()
# #logger = tf.get_logger()
# return logger
def init_logging(log_file='', name='log_name', level=logging.DEBUG):
if len(log_file) == 0:
return init_logging(log_file)
"""Function setup as many loggers as you want"""
if log_file != '':
log_file = datetime.now().strftime(log_file + '_%Y_%m_%d_%H_%M.log')
formatter = logging.Formatter('%(levelname)s %(asctime)-15s [%(lineno)d] %(funcName)s: %(message)s')
handler = logging.FileHandler(log_file, 'w')
handler.setFormatter(formatter)
logger = logging.getLogger(name)
#logger = tf.get_logger(name)
logger.setLevel(level)
logger.addHandler(handler)
return logger
if __name__ == '__main__':
#log_file = "/home/ivan/Research/projects/yhao_cnn_varying/src/python/test"
#init_logging(log_file)
#setup_logger(log_file)
#logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
logger = setup_logger('')
#logging.debug('This message should appear on the console')
logger.info('So should this')
#logging.warning('And this, too')
```
#### File: src/fileio/object_io.py
```python
import pickle
from collections import Counter
import numpy as np
from data_io import list_files
#python 2 version
def save_obj_2(obj_list, out_file):
with open(out_file, 'w') as f:
pickle.dump(obj_list, f)
def load_obj_2(load_file):
with open(load_file) as f:
obj_list = pickle.load(f)
return obj_list
#python 3 version
def save_obj(obj_list, out_file):
s = open(out_file, 'wb')
pickle.dump(obj_list, s)
def load_obj(load_file):
s = open(load_file, 'rb')
return pickle.load(s)
if __name__ == '__main__':
obj_file = "../../object/dsa/incre_genetic_projected_cnn_out_pckl/train_0_incre_genetic_projected_fit0_class0_result.pckl"
obj_file = "../../object/dsa/incre_genetic_projected_cnn_out_pckl/train_5_incre_genetic_projected_fit0_class1_result.pckl"
obj_file = "../../object/dsa/train_test_3_fold/incre_genetic_projected_cnn_out_pckl/train_2_incre_genetic_projected_fit0_class0_result.pckl"
obj_file = "../../object/dsa/train_test_3_fold/projected_generation/genetic_cnn_projected_class_based_class6.pckl"
obj_file = "../../object/dsa/incre_genetic_projected_cnn_temp_saver/train_5.txt_class8_fit_type_0_gene_3__c5_1_p2_1_c-1_1_p-1_-1.ckpt_last_conv_layer_output.pckl"
obj_file = "../../object/dsa/train_test_3_fold/projected_load_generation/genetic_cnn_projected_class_based_class6.pckl"
obj_file = "../../object/dsa/train_test_3_fold/incre_genetic_projected_9_cnn_out_pckl/train_0_incre_genetic_projected_9_fit9_class6_result.pckl"
obj_file = "../../object/dsa/train_test_3_fold/incre_genetic_projected_9_cnn_out_pckl_all_fold_knn/incre_genetic_projected_9_fit9_class1_result.pckl"
obj_file = "../../object/dsa/train_test_3_fold/incre_genetic_projected_9_cnn_out_pckl/train_1_incre_genetic_projected_9_fit9_class6_result.pckl"
obj_file = "../../object/dsa/train_test_3_fold/incre_genetic_covered_fit9_fold_feature/train_2_incre_genetic_covered_fit9_fit9_class9_result.pckl"
obj_file = '../../object/dsa/tkde_2005_dcpc_score/train_0_dcpc.obj'
obj_file = "../../object/rar/all_feature_classification/fcn_obj_folder_rf_lda/train_6_rf_lda_min0_max33.obj"
obj_file = "../../object/arabic/arxiv_2017_mask/train_0_mask_gene_shapNum8_shapMin10_shapMax20_class0.obj"
obj_file = "../../object/dsa/all_feature_classification/cnn_result_folder/train_0_all_feature_cnn_result.ckpt"
obj_file = "../../object/dsa/all_feature_classification/cnn_obj_folder/train_9_count0_cnn_class0_c5_1_p2_1_c5_1_p2_1_c115_1_p-1_-1.ckpt"
obj_file = "../../object/dsa/all_feature_classification/fcn_obj_folder/train_9_count0_fcn_class0_c8_1_c5_1_c3_1global_p112_1.ckpt"
obj_file = "../../object/ara/arxiv_2017_mask/train_0_mask_gene_shapNum8_shapMin10_shapMax20_class1.obj"
obj_file = "../../object/ara/arxiv_2017_mask/train_0_mask_gene_shapNum10_shapMin3_shapMax5_class0.obj"
obj_file = "../../object/ara/pure_feature_generation/train_0_rf_lda_min0_max10pure_projected.obj"
obj_file = "../../object/dsa/cnn_classification/cnn_obj_folder/train_0_act3_c5_1_p2_1_c5_1_p2_1_c115_1_p-1_-1.ckpt"
#obj_folder = '../../object/dsa/tkde_2005_dcpc_score_libsvm_out/'
#obj_folder = '../../object/dsa/all_feature_classification/fcn_obj_folder/'
#obj_file = 'train_0_count0_fcn_class0_c8_1_c5_1_c3_1global_p112_1.ckpt'
#obj_vector = load_obj(obj_folder + obj_file)
obj_vector = load_obj(obj_file)[1]
print (obj_vector.shape)
#print (obj_vector)
```
#### File: src/tensor_model/example_cnn.py
```python
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
pickle_file = '../../data/notMNIST/notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
image_size = 28
# Create image size function based on input, filter size, padding and stride
# 2 convolutions only with 2 pooling
def output_size_pool(input_size, conv_filter_size, pool_filter_size, padding, conv_stride, pool_stride):
if padding == 'same':
padding = -1.00
elif padding == 'valid':
padding = 0.00
else:
return None
# After convolution 1
output_1 = (((input_size - conv_filter_size - 2*padding) / conv_stride) + 1.00)
# After pool 1
output_2 = (((output_1 - pool_filter_size - 2*padding) / pool_stride) + 1.00)
# After convolution 2
output_3 = (((output_2 - conv_filter_size - 2*padding) / conv_stride) + 1.00)
# After pool 2
output_4 = (((output_3 - pool_filter_size - 2*padding) / pool_stride) + 1.00)
return int(output_4)
final_image_size = output_size_pool(input_size=image_size, conv_filter_size=5, pool_filter_size=2, padding='valid', conv_stride=1, pool_stride=2)
print(final_image_size)
batch_size = 16
# Depth is the number of output channels
# On the other hand, num_channels is the number of input channels set at 1 previously
depth = 32
num_hidden = 64
beta = 0.001
graph = tf.Graph()
with graph.as_default():
'''Input data'''
tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size, image_size, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
'''Variables'''
# Convolution 1 Layer
# Input channels: num_channels = 1
# Output channels: depth = 16
layer1_weights = tf.Variable(tf.truncated_normal([patch_size, patch_size, num_channels, depth], stddev=0.1))
layer1_biases = tf.Variable(tf.zeros([depth]))
# Convolution 2 Layer
# Input channels: depth = 16
# Output channels: depth = 16
layer2_weights = tf.Variable(tf.truncated_normal([patch_size, patch_size, depth, depth], stddev=0.1))
layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]))
# First Fully Connected Layer (Densely Connected Layer)
# Use neurons to allow processing of entire image
final_image_size = output_size_pool(input_size=image_size, conv_filter_size=5, pool_filter_size=2, padding='valid', conv_stride=1, pool_stride=2)
layer3_weights = tf.Variable(tf.truncated_normal([final_image_size * final_image_size * depth, num_hidden], stddev=0.1))
layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))
# Second Fully Connected Layer
layer4_weights = tf.Variable(tf.truncated_normal([num_hidden, num_hidden], stddev=0.1))
layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))
# Readout layer: Softmax Layer
layer5_weights = tf.Variable(tf.truncated_normal([num_hidden, num_labels], stddev=0.1))
layer5_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))
'''Model'''
def model(data):
# First Convolutional Layer with Pooling
conv_1 = tf.nn.conv2d(data, layer1_weights, strides=[1, 1, 1, 1], padding='VALID')
hidden_1 = tf.nn.relu(conv_1 + layer1_biases)
pool_1 = tf.nn.avg_pool(hidden_1, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID')
# Second Convolutional Layer with Pooling
conv_2 = tf.nn.conv2d(pool_1, layer2_weights, strides=[1, 1, 1, 1], padding='VALID')
hidden_2 = tf.nn.relu(conv_2 + layer2_biases)
pool_2 = tf.nn.avg_pool(hidden_2, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID')
# First Fully Connected Layer
shape = pool_2.get_shape().as_list()
reshape = tf.reshape(pool_2, [shape[0], shape[1] * shape[2] * shape[3]])
hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)
keep_prob = 0.5
hidden_drop = tf.nn.dropout(hidden, keep_prob)
# Second Fully Connected Layer
hidden_2 = tf.nn.relu(tf.matmul(hidden_drop, layer4_weights) + layer4_biases)
hidden_2_drop = tf.nn.dropout(hidden_2, keep_prob)
# Readout Layer: Softmax Layer
return tf.matmul(hidden_2_drop, layer5_weights) + layer5_biases
'''Training computation'''
logits = model(tf_train_dataset)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Loss function with L2 Regularization
# regularizers = tf.nn.l2_loss(layer4_weights) + \
# tf.nn.l2_loss(layer5_weights)
# loss = tf.reduce_mean(loss + beta * regularizers)
'''Optimizer'''
# Decaying learning rate
global_step = tf.Variable(0) # count the number of steps taken.
start_learning_rate = 0.05
learning_rate = tf.train.exponential_decay(start_learning_rate, global_step, 100000, 0.96, staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
'''Predictions for the training, validation, and test data'''
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
test_prediction = tf.nn.softmax(model(tf_test_dataset))
num_steps = 30000
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 5000 == 0):
print('Minibatch loss at step %d: %f' % (step, l))
print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))
print('Validation accuracy: %.1f%%' % accuracy(valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
```
#### File: src/tensor_model/model_nn.py
```python
import tensorflow as tf
import numpy as np
import time
import sys
import os
from model_setting import nn_parameters
from model_cnn import cross_entropy_setup
from model_cnn import batch_control
sys.path.insert(1, os.path.join(os.path.dirname(sys.path[0]), 'data_processing/'))
from data_processing import class_label_vector_checking
sys.path.insert(1, os.path.join(os.path.dirname(sys.path[0]), 'fileio/'))
from log_io import setup_logger
# Import MNIST data
#from tensorflow.examples.tutorials.mnist import input_data
#mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
def run_nn(train_x_matrix, train_y_matrix, test_x_matrix, test_y_matrix, nn_setting, logger=None):
if logger is None:
logger = setup_logger('')
x_row, x_col = train_x_matrix.shape
y_row, y_col = train_y_matrix.shape
num_classes = y_col
train_x_placeholder, train_y_placeholder, logits_out, keep_prob_placeholder = configure_nn(x_col, num_classes, nn_setting)
best_eval_value, train_run_time, test_run_time, nn_predict_proba = nn_train(train_x_matrix, train_y_matrix, test_x_matrix, test_y_matrix, train_x_placeholder, train_y_placeholder, logits_out, keep_prob_placeholder, nn_setting, logger)
return best_eval_value, train_run_time, test_run_time, nn_predict_proba
##
def conf_nn_layers(train_x_col, input_placeholder, nn_setting, logger=None):
if logger is None:
logger = setup_logger('')
layer_list = nn_setting.layer_list
std_value = nn_setting.std_value
layer_out = input_placeholder
layer_iter = 0
layer_input = train_x_col
keep_prob_placeholder = tf.placeholder(tf.float32)
for neurons in layer_list:
weight_name = "weight_" + str(layer_iter)
bias_name = "bias_" + str(layer_iter)
weight = tf.Variable(tf.random_normal([layer_input, neurons], stddev=std_value, seed=layer_iter), name=weight_name)
bias = tf.Variable(tf.zeros([neurons]), name=bias_name)
layer_input = neurons
hidden_out = tf.add(tf.matmul(layer_out, weight), bias)
layer_out = tf.nn.relu(hidden_out)
layer_out = tf.nn.dropout(layer_out, keep_prob_placeholder)
layer_iter = layer_iter + 1
return layer_out, layer_iter, keep_prob_placeholder
def conf_nn_out(input_matrix, num_classes, std_value, layer):
layer_input = int(input_matrix.get_shape()[1])
weight = tf.Variable(tf.random_normal([layer_input, num_classes], stddev=std_value, seed=layer), name="out_weight")
bias = tf.Variable(tf.random_normal([num_classes], stddev=std_value, seed=layer), name="out_bias")
return tf.add(tf.matmul(input_matrix, weight), bias)
# Both train_x and train_y are 2-d matrixes
def configure_nn(train_x_col, num_classes, nn_setting, logger=None):
if logger is None:
logger = setup_logger('')
std_value = nn_setting.std_value
tf.reset_default_graph()
tf.random.set_random_seed(0)
train_x_placeholder = tf.placeholder(tf.float32, [None, train_x_col])
train_y_placeholder = tf.placeholder(tf.float32, [None, num_classes])
layer_out_matrix, layer_iter, keep_prob_placeholder = conf_nn_layers(train_x_col, train_x_placeholder, nn_setting, logger)
logits_out = conf_nn_out(layer_out_matrix, num_classes, std_value, layer_iter)
return train_x_placeholder, train_y_placeholder, logits_out, keep_prob_placeholder
def nn_train(train_x_matrix, train_y_matrix, test_x_matrix, test_y_matrix, train_x_placeholder, train_y_placeholder, logits_out, keep_prob_placeholder, nn_setting, logger):
if logger is None:
logger = setup_logger('')
(overall_len, x_col) = train_x_matrix.shape
(y_row, num_classes) = train_y_matrix.shape
predict_y_proba = tf.nn.softmax(logits_out)
train_y_vector = np.argmax(train_y_matrix, axis=1)
max_class = max(train_y_vector)
min_class = min(train_y_vector)
eval_method = nn_setting.eval_method
batch_size = nn_setting.batch_size
stop_threshold = nn_setting.stop_threshold
max_iter = nn_setting.max_epoch
saver_file = nn_setting.save_file
cross_entropy, eval_method_value, eval_method_keyword, coefficient_placeholder = cross_entropy_setup(eval_method, num_classes, logits_out, train_y_placeholder)
beta = 0.001
full_weight = tf.get_default_graph().get_tensor_by_name("weight_0:0")
regularizers = tf.nn.l2_loss(full_weight)
cross_entropy = tf.reduce_mean(cross_entropy + regularizers * beta)
train_class_index_dict, train_min_length, train_max_length = class_label_vector_checking(train_y_vector)
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
nn_session = tf.InteractiveSession()
nn_session.run(tf.global_variables_initializer())
test_eval_value = 0
best_eval_value = 0
i = 0
start = 0
epoch = 0
end = batch_size
batch_each_class = int(batch_size/num_classes)
saver = tf.train.Saver()
train_run_time = 0
np.random.seed(epoch)
batch_index = np.random.permutation(overall_len)
logger.info("Random Epoch: " + str(epoch) + str(batch_index[0:5]))
keep_prob_val = 0.5
while(test_eval_value < stop_threshold):
if start >= overall_len:
start = 0
end = start + batch_size
epoch = epoch + 1
np.random.seed(epoch)
logger.info("Random Epoch: " + str(epoch) + str(batch_index[0:5]))
print("Random Epoch: " + str(epoch) + str(batch_index[0:5]))
batch_index = np.random.permutation(overall_len)
elif end > overall_len:
end = overall_len
batch_x_matrix = train_x_matrix[batch_index[start:end], :]
batch_y_matrix = train_y_matrix[batch_index[start:end], :]
if eval_method == 'f1' or eval_method == "acc":
if i == 0:
logger.info("Batch controlled")
print("Batch controled")
batch_x_matrix, batch_y_matrix, coefficients_vector = batch_control(batch_x_matrix, batch_y_matrix, train_x_matrix, train_y_matrix, i, batch_each_class, min_class, max_class, train_class_index_dict, logger)
batch_max_len = float(max(coefficients_vector))
coefficients_vector = batch_max_len/coefficients_vector
start_time = time.time()
train_step.run(feed_dict={train_x_placeholder: batch_x_matrix, train_y_placeholder: batch_y_matrix, coefficient_placeholder: coefficients_vector, keep_prob_placeholder: keep_prob_val})
train_run_time = train_run_time + time.time() - start_time
else:
start_time = time.time()
train_step.run(feed_dict={train_x_placeholder: batch_x_matrix, train_y_placeholder: batch_y_matrix, keep_prob_placeholder: keep_prob_val})
train_run_time = train_run_time + time.time() - start_time
if i % 100 == 0:
test_eval_value = eval_method_value.eval(feed_dict={
train_x_placeholder: test_x_matrix, train_y_placeholder: test_y_matrix, keep_prob_placeholder: 1.0})
if str(test_eval_value) == 'nan':
test_eval_value = 0
#print_str = "step " + str(i) + ", training " + eval_method_keyword + ": " + str(train_eval_value)
#logger.info(print_str)
print_str = "step " + str(i) + ", testing " + eval_method_keyword + ": " + str(test_eval_value)
logger.info(print_str)
print(print_str)
if best_eval_value < test_eval_value:
# Save the variables to disk.
best_eval_value = test_eval_value
save_path = saver.save(nn_session, saver_file)
print_str = "Model saved in file: " + save_path + ' at iteration: ' + str(i)
logger.info(print_str)
i = i + 1
start = end
end = end + batch_size
if epoch > max_iter:
logger.info("best eval value at epoch: " + str(epoch))
logger.info("best eval value to break")
logger.info(best_eval_value)
break
start_time = time.time()
test_eval_value = eval_method_value.eval(feed_dict={train_x_placeholder: test_x_matrix, train_y_placeholder: test_y_matrix, keep_prob_placeholder: 1.0})
test_run_time = time.time() - start_time
if test_eval_value < best_eval_value:
nn_session.close()
nn_session = tf.InteractiveSession()
saver.restore(nn_session, saver_file)
else:
best_eval_value = test_eval_value
logger.info("Running iteration: %d" % (i))
logger.info("final best " + eval_method_keyword + ": " + str(best_eval_value))
logger.info("final test " + eval_method_keyword + ": " + str(test_eval_value))
print("final best " + eval_method_keyword + ": " + str(best_eval_value))
print("final test " + eval_method_keyword + ": " + str(test_eval_value))
nn_predict_proba = nn_session.run(predict_y_proba, feed_dict={train_x_placeholder: test_x_matrix, keep_prob_placeholder: 1.0})
logger.info("NN model saved: " + str(saver_file))
nn_session.close()
return best_eval_value, train_run_time, test_run_time, nn_predict_proba
def main():
train_y_vector = np.array([1,1,2,2,2,2,2,2,2,2, 3, 3, 3])
train_y_predict_vector = np.array([2,2,2,2,2,2,2,2,2,2, 2, 2,2])
min_class = min(train_y_vector)
max_class = max(train_y_vector)
num_classes = max_class - min_class + 1
train_class_index_dict, train_min_length, train_max_length = class_label_vector_checking(train_y_vector)
correct_prediction = tf.equal(train_y_vector, train_y_predict_vector)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
avg_accuracy = tf.Variable(0.0, tf.float32)
for c_label in range(min_class, max_class+1):
c_label_index = train_class_index_dict[c_label]
class_y_vector = train_y_vector[c_label_index]
class_y_predict = train_y_predict_vector[tf.constant(c_label_index)]
class_accuracy = tf.reduce_mean(tf.cast(tf.equal(class_y_vector, class_y_predict), tf.float32))
avg_accuracy = avg_accuracy + class_accuracy
avg_accuracy = (avg_accuracy)/num_classes
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print (sess.run(accuracy))
print (sess.run(avg_accuracy))
def main_test():
train_x_matrix = np.array([[1,1,1], [1,2,2], [1,2,3], [2,2,2], [3,3,3], [3,2,1], [1,1,1], [1,2,2], [1,2,3], [2,2,2], [3,3,3], [3,2,1], [1,1,1], [1,2,2], [1,2,3], [2,2,2], [3,3,3], [3,2,1], [1,1,1], [1,2,2], [1,2,3], [2,2,2], [3,3,3], [3,2,1]])
train_y_matrix = np.array([[0,1],[1,0], [1,0], [0,1], [0, 1], [1, 0], [0,1],[1,0], [1,0], [0,1], [0, 1], [1, 0], [0,1],[1,0], [1,0], [0,1], [0, 1], [1, 0], [0,1],[1,0], [1,0], [0,1], [0, 1], [1, 0]])
test_x_matrix = np.array([[1,1,1], [1,2,2], [1,2,3], [2,2,2], [3,3,3], [3,2,1]])
test_y_matrix = np.array([[0,1],[1,0], [1,0], [0,1], [0, 1], [1, 0]])
#train_x_matrix = mnist.train.images
#train_y_matrix = mnist.train.labels
#test_x_matrix = mnist.test.images
#test_y_matrix = mnist.test.labels
print (train_x_matrix.shape)
print (train_y_matrix.shape)
print (test_y_matrix.shape)
#train_x_matrix = test_x_matrix
#train_y_matrix = test_y_matrix
print(train_x_matrix.shape)
print(train_y_matrix.shape)
layer_list = np.array([300])
num_classes = 2
x_col = 3
batch_size = 100
learning_rate = 0.001
logger = None
saver_file = './test.save'
nn_setting = nn_parameters(layer_list, batch_size, 5, 0.9, 3, 0.02, 'f1', saver_file)
best_eval_value, train_run_time, test_run_time, nn_predict_proba = run_nn(train_x_matrix, train_y_matrix, test_x_matrix, test_y_matrix, nn_setting, logger)
print(best_eval_value)
print(train_run_time)
print(test_run_time)
print(nn_predict_proba)
if __name__ == '__main__':
# run_simple_graph()
# run_simple_graph_multiple()
# simple_with_tensor_board()
#nn_example()
main_test()
```
#### File: src/tensor_model/model_setting.py
```python
import numpy as np
class nn_parameters:
def __init__(self, layer_list, batch_size=100, max_epoch=5, stop_threshold=0.9, activation_fun=0, std_value=0, eval_method='accuracy', save_file='./default_nn.ckpt'):
self.layer_list = layer_list
self.batch_size = batch_size
self.max_epoch = max_epoch
self.stop_threshold = stop_threshold
self.activation_fun = activation_fun
self.std_value = std_value
self.eval_method = eval_method
self.save_file = save_file
def to_string(self):
ret_str = 'layer list: \n' + np.array_str(self.layer_list) +'\nbatch size: ' + str(self.batch_size) +'\nmax epoch: ' + str(self.max_epoch) +'\nstop threshold: ' + str(self.stop_threshold)
if self.activation_fun == 0:
ret_str = ret_str + '\nactivation function: RELU'
elif self.activation_fun == 1:
ret_str = ret_str + '\nactivation function: Sigmod'
else:
ret_str = ret_str + '\nactivation function: Tanh'
ret_str = ret_str +'\ninitial std value: ' + str(self.std_value)
ret_str = ret_str + '\neval method: ' + self.eval_method
ret_str = ret_str + '\nsave obj file: ' + self.save_file
return ret_str
class cnn_parameters:
# If feature_method == 'none' means do not need to do feature detection
# conv_kernel_list: [[r1, c1], [r2, c2], [r3, c3]]: means first convolutional kernel is c1 = r1 * c1
# pool_rate_list: [[r1, c1], [r2, c2], [r3, c3]]: means first pooling kernel is r1 * c1
# feature_num_list: [a, b], means after c1, there are a numbers of feature maps. b feature maps after c2
def __init__(self, conv_kernel_list, pool_rate_list, feature_num_list, batch_size=100, max_iter=900, stop_threshold=0.9, activation_fun=0, std_value=0, same_size=False, feature_method='vote', eval_method='accuracy', out_obj_folder='./', out_model_folder='./', group_list = [], input_map=1, full_feature_num=-1, l2_bool=False, keep_prob=0.5):
self.conv_kernel_list = conv_kernel_list
self.pool_rate_list = pool_rate_list
self.feature_num_list = feature_num_list
self.batch_size = batch_size
self.max_iter = max_iter
self.stop_threshold = stop_threshold
self.activation_fun = activation_fun
self.std_value = std_value
self.same_size = same_size
self.feature_method = feature_method
self.out_obj_folder = out_obj_folder
self.out_model_folder = out_model_folder
self.eval_method = eval_method
self.input_map = input_map
self.full_feature_num = full_feature_num
self.l2_bool = l2_bool
self.keep_prob_val = keep_prob
def to_string(self):
ret_str = 'conv kernel list: \n' + np.array_str(self.conv_kernel_list) +'\npool rate list: \n' + np.array_str(self.pool_rate_list) +'\nfeature map num list: \n' + np.array_str(self.feature_num_list) +'\nbatch size: ' + str(self.batch_size) +'\nmax iteration: ' + str(self.max_iter) +'\nstop threshold: ' + str(self.stop_threshold)
if self.activation_fun == 0:
ret_str = ret_str + '\nactivation function: RELU with count'
elif self.activation_fun == 1:
ret_str = ret_str + '\nactivation function: Sigmod'
elif self.activation_fun == 2:
ret_str = ret_str + '\nactivation function: Tanh'
else:
ret_str = ret_str + '\nactivation function: RELU'
ret_str = ret_str +'\ninitial std value: ' + str(self.std_value) +'\ncnn same size or not: ' + str(self.same_size) +'\nfeature method: ' + str(self.feature_method)
ret_str = ret_str + '\neval method: ' + self.eval_method
ret_str = ret_str + '\nsave obj folder: ' + self.out_obj_folder + '\ntemp obj folder: ' + self.out_model_folder
#ret_str = ret_str + "\ngroup list: " + str(self.group_list)
ret_str = ret_str + "\nkeep prob: " + str(self.keep_prob_val)
return ret_str
def cnn_setting_clone(cnn_setting):
return cnn_parameters(cnn_setting.conv_kernel_list, cnn_setting.pool_rate_list, cnn_setting.feature_num_list, cnn_setting.batch_size, cnn_setting.max_iter, cnn_setting.stop_threshold, cnn_setting.activation_fun, cnn_setting.std_value, cnn_setting.same_size, cnn_setting.feature_method, cnn_setting.eval_method, cnn_setting.out_obj_folder, cnn_setting.out_model_folder)
#This function is used to conver input string to numpy array, used for conv layers and pooling layers
#The output array column number need to be given, col_num=2 for conv_layers and pooling_layers, col_num=1 for feature_num_list
def string_array_to_numpy_array(input_str_array, delimiter=' ', col_num=2):
array_len = len(input_str_array)
return_array = []
for i in range(0, array_len):
return_element = []
element = input_str_array[i].strip()
element = element.split(delimiter)
if len(element) != col_num:
raise Exception("The column number should be " + str(col_num) + " , please check your cnn setting parameter file")
for item in element:
element_str = str(item)
if element_str.startswith('['):
element_str = element_str.replace('[', '').replace(']','')
return_element.append(element_str.split(','))
else:
return_element.append(int(element_str))
return_array.append(return_element)
return_array = np.array(return_array)
if col_num == 1:
return_array = return_array.reshape(array_len)
return return_array
###############################################################
# CNN function
def return_cnn_setting_from_file(cnn_setting_file):
keyword_list = ['#kernel_list', '#pooling_list', '#feature_num_list', '#batch', '#max_iter', '#stop_threshold', '#activation_func', '#std_value', '#same_size', '#feature_method', '#eval_method']
first_delimiter = ', '
second_delimiter = ' '
keyword = ''
conv_kernel_list = ''
pool_rate_list = ''
feature_num_list = ''
batch_size = -1
max_iter = -1
stop_threshold = -1
activation_func = -1
std_value = -1
same_size = ''
feature_method = ''
eval_method = ''
full_feature_num = 400
lines = open(cnn_setting_file).readlines()
for line in lines:
if line.startswith('#'):
temp = line.strip()
if temp in keyword_list:
keyword = line.strip()
continue
if keyword == '#kernel_list':
conv_kernel_list = line.strip()
elif keyword == '#pooling_list':
pool_rate_list = line.strip()
elif keyword == '#feature_num_list':
feature_num_list = line.strip()
elif keyword == '#batch':
batch_size = int(line.strip())
elif keyword == '#max_iter':
max_iter = int(line.strip())
elif keyword == '#stop_threshold':
stop_threshold = float(line.strip())
elif keyword == '#activation_func':
activation_func = int(line.strip())
elif keyword == '#std_value':
std_value = float(line.strip())
elif keyword == '#same_size':
same_size = line.strip()
elif keyword == '#feature_method':
feature_method = line.strip()
elif keyword == '#eval_method':
eval_method = line.strip()
if batch_size<0 or max_iter<0 or stop_threshold<0 or activation_func<0 or std_value<0:
raise Exception("Wrong data paramters, please check the parameter file " + parameter_file)
conv_kernel_list = conv_kernel_list.split(first_delimiter)
pool_rate_list = pool_rate_list.split(first_delimiter)
feature_num_list = feature_num_list.split(first_delimiter)
if len(conv_kernel_list) != len(pool_rate_list) or len(pool_rate_list) != len(feature_num_list):
raise Exception("Conv layers and pooling layers need to have the same number")
conv_kernel_array = string_array_to_numpy_array(conv_kernel_list, second_delimiter, 2)
pool_rate_array = string_array_to_numpy_array(pool_rate_list, second_delimiter, 2)
feature_num_array = string_array_to_numpy_array(feature_num_list, second_delimiter, 1)
return cnn_parameters(conv_kernel_array, pool_rate_array, feature_num_array, batch_size, max_iter, stop_threshold, activation_func, std_value, same_size, feature_method, eval_method)
def return_cnn_keyword(cnn_setting):
conv_kernel_list = cnn_setting.conv_kernel_list
pool_rate_list = cnn_setting.pool_rate_list
feature_num_list = cnn_setting.feature_num_list
cnn_keyword = ""
feature_count = 0
for item in conv_kernel_list:
cnn_keyword = cnn_keyword + '_c' + str(item[0]) + '-' + str(item[1]) + '_' + str(feature_num_list[feature_count])
feature_count = feature_count + 1
for item in pool_rate_list:
cnn_keyword = cnn_keyword + '_p' + str(item[0]) + '-' + str(item[1])
cnn_keyword = cnn_keyword + "_a" + str(cnn_setting.activation_fun)
return cnn_keyword
def return_cnn_default_setting(conv_kernel_array=np.array([[1, 2], [1, 2]]), pool_rate_array=np.array([[1, 2], [1, 2]]), feature_num_array=np.array([2, 2]), batch_size=100, max_iter=200, stop_threshold=0.9, activation_func=0, std_value=0.02, same_size=False, feature_method='none', eval_method='acc'):
cnn_setting = cnn_parameters(conv_kernel_array, pool_rate_array, feature_num_array, batch_size, max_iter, stop_threshold, activation_func, std_value, same_size, feature_method, eval_method)
cnn_keyword = return_cnn_keyword(cnn_setting)
return cnn_setting, cnn_keyword
# End of CNN function
###############################################################
###############################################################
# NN function
def return_nn_keyword(nn_setting):
layer_list = nn_setting.layer_list
nn_keyword = ""
feature_count = 0
for item in layer_list:
nn_keyword = nn_keyword + '_l' + str(item)
feature_count = feature_count + 1
return nn_keyword
def return_nn_setting_from_file(nn_setting_file):
keyword_list = ['#layer_list', '#batch_size', '#max_epoch', '#stop_threshold', '#activation_fun', '#std_value', '#eval_method', '#obj_folder']
first_delimiter = ','
second_delimiter = ' '
keyword = ''
layer_list = ''
batch_size = -1
max_epoch = -1
stop_threshold = -1
activation_fun = -1
std_value = -1
eval_method = ''
lines = open(nn_setting_file).readlines()
for line in lines:
if line.startswith('#'):
temp = line.strip()
if temp in keyword_list:
keyword = line.strip()
continue
if keyword == '#layer_list':
layer_list = line.strip()
elif keyword == '#batch_size':
batch_size = int(line.strip())
elif keyword == '#max_epoch':
max_epoch = int(line.strip())
elif keyword == '#stop_threshold':
stop_threshold = float(line.strip())
elif keyword == '#activation_fun':
activation_fun = int(line.strip())
elif keyword == '#std_value':
std_value = float(line.strip())
elif keyword == '#eval_method':
eval_method = line.strip()
if batch_size<0 or max_epoch<0 or stop_threshold<0 or activation_fun<0 or std_value<0:
raise Exception("Wrong data paramters, please check the parameter file " + parameter_file)
layer_list = layer_list.split(first_delimiter)
layer_list = string_array_to_numpy_array(layer_list, second_delimiter, 1)
nn_setting = nn_parameters(layer_list, batch_size, max_epoch, stop_threshold, activation_fun, std_value, eval_method)
nn_keyword = return_nn_keyword(nn_setting)
nn_setting.save_file = nn_keyword+'.ckpt'
return nn_setting, nn_keyword
# End of NN function
###############################################################
if __name__ == '__main__':
#parameter_file = '../../parameters/global_feature_generation.txt'
#read_global_feature_generation_parameter(parameter_file)
#parameter_file = '../../parameters/cnn_model_parameter.txt'
#cnn_p = return_cnn_setting_from_file(parameter_file)
#print cnn_p.print_to_string()
#print return_cnn_keyword(cnn_p)
parameter_file = '../../parameters/cnn_model_parameter_varying.txt'
cnn_p = return_cnn_setting_from_file(parameter_file)
```
#### File: src/utils/sklearn_classification.py
```python
import numpy as np
import sys
import time
import gc
from sklearn import neighbors
from sklearn import svm
from sklearn.ensemble import BaggingClassifier, RandomForestClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
#from svmutil import *
from collections import Counter
from log_io import init_logging
###
def gene_projected_lda_feature(train_x_matrix, train_y_vector):
norm_time = 0
start_time = time.time()
train_norm_vector = np.linalg.norm(train_x_matrix, axis=0, ord=np.inf)[None, :]
train_x_matrix = np.true_divide(train_x_matrix, train_norm_vector, where=(train_norm_vector!=0))
norm_time = time.time() - start_time
train_x_matrix[np.isnan(train_x_matrix)] = 0
train_x_matrix[np.isinf(train_x_matrix)] = 1
min_class = min(train_y_vector)
max_class = max(train_y_vector)
ret_feature_matrix = []
lda_time = 0
start_time = time.time()
clf = LinearDiscriminantAnalysis()
lda_time = lda_time + time.time() - start_time
for i in range(min_class, max_class+1):
temp_y_vector = np.where(train_y_vector==i, 0, 1)
#print "FIT"
#print len(train_x_matrix)
#print len(temp_y_vector)
start_time = time.time()
clf.fit(train_x_matrix, temp_y_vector)
lda_time = lda_time + time.time() - start_time
ret_feature_matrix.append(clf.coef_)
ret_feature_matrix = np.squeeze(np.array(ret_feature_matrix))
ret_feature_matrix = np.absolute(ret_feature_matrix)
#print ret_feature_matrix
#print "Function end: gen_projected_lda_feature"
return ret_feature_matrix, norm_time, lda_time
def bi_gene_lda_model(train_x_matrix, train_y_vector):
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
#print train_x_matrix.shape
#print train_y_vector.shape
start_time = time.time()
clf.fit(train_x_matrix, train_y_vector)
train_time = time.time() - start_time
return clf, train_time
def gene_lda_model(train_x_matrix, train_y_vector):
clf = LinearDiscriminantAnalysis()
#print train_x_matrix.shape
#print train_y_vector.shape
start_time = time.time()
clf.fit(train_x_matrix, train_y_vector)
train_time = time.time() - start_time
return clf, train_time
def run_lda(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, proba=False):
clf, train_time = gene_lda_model(train_x_matrix, train_y_vector)
if proba == True:
predict_y = clf.predict(test_x_matrix)
start_time = time.time()
predict_y_proba = clf.predict_proba(test_x_matrix)
test_time = time.time() - start_time
else:
start_time = time.time()
predict_y = clf.predict(test_x_matrix)
test_time = time.time() - start_time
predict_y_proba = None
return accuracy_score(test_y_vector, predict_y), predict_y, predict_y_proba, train_time, test_time
def run_rf(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, samples_leaf=20, proba=False):
np.random.seed(0)
#positive_index = np.where(train_y_vector==1)
#negative_index = np.where(train_y_vector==0)
#len_positive = len(np.where(train_y_vector == 1)[0])
#len_negative = len(train_y_vector) - len_positive
#logger.info("positive: " + str(len_positive))
#logger.info("negative: " + str(len_negative))
#if len_positive > len_negative:
# add_pare = '-w0 ' + str(len_positive/len_negative) + ' -w1 1'
#else:
# add_pare = '-w1 ' + str(len_negative/len_positive) + ' -w0 1'
clf = RandomForestClassifier(min_samples_leaf=samples_leaf, class_weight='balanced')
start_time = time.time()
clf.fit(train_x_matrix, train_y_vector)
train_time = time.time() - start_time
start_time = time.time()
predict_y = clf.predict(test_x_matrix)
test_time = time.time() - start_time
if proba is False:
predict_y_proba = None
else:
predict_y_proba = clf.predict_proba(test_x_matrix)
return accuracy_score(test_y_vector, predict_y), predict_y, predict_y_proba, train_time, test_time
# train_x_matrix: row_num * col_num, train_y_vector: vector
def run_dt(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, n_neighbors, proba=False):
clf = DecisionTreeClassifier(random_state=0, class_weight='balanced')
#n_estimators = 10
#clf = OneVsRestClassifier(BaggingClassifier(neighbors.KNeighborsClassifier(n_neighbors, weights="distance"), max_samples=1.0 / n_estimators, n_estimators=n_estimators))
start_time = time.time()
clf.fit(train_x_matrix, train_y_vector)
train_time = time.time() - start_time
start_time = time.time()
predict_y = clf.predict(test_x_matrix)
test_time = time.time() - start_time
if proba == False:
predict_y_proba = None
else:
predict_y_proba = clf.predict_proba(test_x_matrix)
return accuracy_score(test_y_vector, predict_y), predict_y, predict_y_proba, train_time, test_time
# train_x_matrix: row_num * col_num, train_y_vector: vector
def run_knn(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, n_neighbors, proba=False):
clf = neighbors.KNeighborsClassifier(n_neighbors, weights="distance")
#n_estimators = 10
#clf = OneVsRestClassifier(BaggingClassifier(neighbors.KNeighborsClassifier(n_neighbors, weights="distance"), max_samples=1.0 / n_estimators, n_estimators=n_estimators))
start_time = time.time()
clf.fit(train_x_matrix, train_y_vector)
train_time = time.time() - start_time
start_time = time.time()
predict_y = clf.predict(test_x_matrix)
test_time = time.time() - start_time
if proba == False:
predict_y_proba = None
else:
predict_y_proba = clf.predict_proba(test_x_matrix)
return accuracy_score(test_y_vector, predict_y), predict_y, predict_y_proba, train_time, test_time
# run knn method with returning distance values
# train_x_matrix: numpy matrix with N * A: N: is number of training instances, A is number of attributes
# train_y_vector: numpy vector N * 1
# test_x_matrix: numpy matrix with N1 * A: N1 is the number of testing instances
# test_y_vector: numpy vector N1 * 1
# n_neighbors: top K value
# it returns three values
# distances: a numpy matrix D with N1 * n_neighbors, D_ij means the distance from test instance i to the jth nearest training instance
# indexes: a numpy matrix I with N1 * n_neighbors, it records the corresponding index for the jth nearest training instance
# the distance calculation: from [A11, A12, A13] to [A21, A22, A23] is dist = sqrt((A11-A21)^2 + (A12-A22)^2 + (A13-A23)^2)
def run_knn_with_dist(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, n_neighbors=1):
clf = neighbors.KNeighborsClassifier(n_neighbors, weights="distance")
min_class = min(train_y_vector)
max_class = max(train_y_vector)
distance_matrix = []
for i in range(min_class, max_class+1):
train_index = np.where(train_y_vector==i)[0]
knn_model = clf.fit(train_x_matrix[train_index, :], train_y_vector[train_index])
distances, indexes = knn_model.kneighbors(test_x_matrix, n_neighbors, True)
distance_matrix.append(distances)
distance_matrix = np.array(distance_matrix).reshape(max_class-min_class+1, len(test_y_vector))
distance_matrix = distance_matrix.T
start_time = time.time()
knn_model = clf.fit(train_x_matrix, train_y_vector)
train_time = time.time() - start_time
start_time = time.time()
predict_y_vector = knn_model.predict(test_x_matrix)
test_time = time.time() - start_time
return distance_matrix, predict_y_vector, train_time, test_time
def get_pred_matrix(train_y_vector, index_matrix):
x_row, x_col = index_matrix.shape
pred_matrix = np.zeros([x_row, x_col]).astype(int)
for i in range(0, x_row):
pred_matrix[i] = train_y_vector[index_matrix[i]]
return pred_matrix
# find the first instance belongs to current_class d1 and the first instance does not belong to the current_class d2
# then the probability belongs to current_class is 1- (d1/(d1 + d2)) and the probability it does not belong to current_class is 1 - (d2/(d1 + d2))
# This function will transfer all class labels to be one continues vector wihch starts from 0
# return test_x_matrix_row * num_classes matrix, it contains the probability dist for each class
# the small return value means the higher probability
def run_knn_with_proba(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector):
train_row, train_col = train_x_matrix.shape
test_row, test_col = test_x_matrix.shape
min_class = min(train_y_vector)
if min_class != 0:
train_y_vector = train_y_vector - min_class
min_class = 0
max_class = max(train_y_vector)
num_classes = max_class + 1
dist_matrix, index_matrix, knn_model, train_time, test_time = run_knn_with_dist(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, n_neighbors)
start_time = time.time()
pred_matrix = get_pred_matrix(train_y_vector, index_matrix)
max_dist = dist_matrix.max() + 1.0
predict_proba_matrix = np.full([test_row, num_classes], max_dist)
predict_proba_matrix = knn_model.predict_proba(test_x_matrix)
#for i in range(0, test_row):
# instance_pred_vector = pred_matrix[i]
# pred_len = len(instance_pred_vector)
# for j in range(0, pred_len):
# c = instance_pred_vector[j]
# if predict_proba_matrix[i][c] != max_dist:
# continue
# predict_proba_matrix[i][c] = dist_matrix[i][j]
#predict_proba_matrix = predict_proba_matrix
#test_time = test_time + time.time() - start_time
#for i in range(0, test_row):
# proba_vector = predict_proba_matrix[i]
# vector_min = proba_vector.min()
# predict_proba_matrix[i] = 1- (predict_proba_matrix[i] - vector_min)/(max_dist - vector_min)
#predict_proba_matrix = (predict_proba_matrix - predict_proba_matrix.min(axis=0))/ (predict_proba_matrix.max(axis=0) - predict_proba_matrix.min(axis=0))
#print predict_proba_matrix
#for i in range(0, test_row):
# proba_vector = predict_proba_matrix[i]
# null_index = np.where(proba_vector==-1)
# not_null_index = np.where(proba_vector!=-1)[0]
# if len(not_null_index) == 1:
# predict_proba_matrix[i][not_null_index] = 1
# else:
# proba_vector = np.delete(proba_vector, null_index)
# sum_proba = sum(proba_vector)
# for j in not_null_index:
# predict_proba_matrix[i][j] = predict_proba_matrix[i][j]/sum_proba
# predict_proba_matrix[i][null_index] = 0
return predict_proba_matrix, train_time, test_time
# Libsvm
def run_sklearn_libsvm(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, proba=False):
train_y_vector = train_y_vector- min(train_y_vector)
test_y_vector = test_y_vector - min(test_y_vector)
train_x_matrix = train_x_matrix.astype(np.float64)
train_y_vector = train_y_vector.astype(np.float64)
test_x_matrix = test_x_matrix.astype(np.float64)
test_y_vector = test_y_vector.astype(np.float64)
weight_array = []
unique, counts = np.unique(train_y_vector, return_counts=True)
count_all = len(train_y_vector)
for i in counts:
weight_array.append(float(1)/i)
weight_array = np.array(weight_array)
start_time = time.time()
model = svm.libsvm.fit(train_x_matrix, train_y_vector, class_weight=weight_array)
train_time = time.time() - start_time
start_time = time.time()
predict_y = svm.libsvm.predict(test_x_matrix, *model)
test_time = time.time() - start_time
if proba is False:
predict_y_proba = None
else:
predict_y_proba = svm.libsvm.predict_proba(test_x_matrix, *model)
#predict_y_proba = None
return accuracy_score(test_y_vector, predict_y), predict_y, predict_y_proba, train_time, test_time
#return accuracy_score(test_y_vector, predict_y), predict_y, train_time, test_time
def banlanced_binary_processing(train_x_matrix, train_y_vector, banlanced_ratio=3):
positive_index = np.where(train_y_vector==0.0)[0]
negative_index = np.where(train_y_vector==1.0)[0]
positive_len = len(positive_index)
negative_len = len(negative_index)
if positive_len > negative_len:
select_len = banlanced_ratio * negative_len
if positive_len > select_len:
select_index = np.random.choice(positive_len, select_len, replace=False)
positive_index = positive_index[select_index]
all_index = np.append(positive_index, negative_index)
train_x_matrix = train_x_matrix[all_index, :]
train_y_vector = train_y_vector[all_index]
else:
select_len = banlanced_ratio * positive_len
if negative_len > select_len:
select_index = np.random.choice(negative_len, select_len, replace=False)
negative_index = negative_index[select_index]
all_index = np.append(negative_index, positive_index)
train_x_matrix = train_x_matrix[all_index, :]
train_y_vector = train_y_vector[all_index]
return train_x_matrix, train_y_vector
def libsvm_load_predict(test_x_matrix, test_y_vector, save_file):
model = svm_load_model(save_file)
predict_y, predict_acc, predict_y_proba = svm_predict(test_y_vector, test_x_matrix, model, '-b 1')
print(predict_acc, predict_y, predict_y_proba)
#libsvm from the author's website
def run_libsvm(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, logger, proba=False, save_file='', weight=True):
train_y_vector = train_y_vector- min(train_y_vector)
test_y_vector = test_y_vector - min(test_y_vector)
#train_x_matrix = train_x_matrix.astype(np.float64)
#train_y_vector = train_y_vector.astype(np.float64)
#test_x_matrix = test_x_matrix.astype(np.float64)
#test_y_vector = test_y_vector.astype(np.float64)
if weight == True:
positive_index = np.where(train_y_vector==1)
negative_index = np.where(train_y_vector==0)
len_positive = len(np.where(train_y_vector == 1)[0])
len_negative = len(train_y_vector) - len_positive
logger.info("positive: " + str(len_positive))
logger.info("negative: " + str(len_negative))
if len_positive > len_negative:
add_pare = '-w0 ' + str(len_positive/len_negative) + ' -w1 1'
else:
add_pare = '-w1 ' + str(len_negative/len_positive) + ' -w0 1'
else:
add_pare = ''
train_x_matrix = train_x_matrix.tolist()
train_y_vector = train_y_vector.astype(np.integer).tolist()
test_x_matrix = test_x_matrix.tolist()
test_y_vector = test_y_vector.astype(np.integer).tolist()
#svm_model.predict = lambda self, x: svm_predict([0], [x], self)[0][0]
#prob = svm_problem([1,-1], [[1,0,1], [-1,0,-1]])
prob = svm_problem(train_y_vector, train_x_matrix)
#logger.info("libsvm parameter: " + '-h 0 -s 0 -t 2 -c 0.03125 -g 0.0078125 -b 1 '+add_pare)
#param = svm_parameter('-h 0 -s 0 -t 2 -c 0.03125 -g 0.0078125 -b 1 '+add_pare)
logger.info("libsvm parameter: " + '-h 0 -s 0 -t 2 -b 1 -e 0.1 '+add_pare)
param = svm_parameter('-h 0 -s 0 -t 2 -b 1 -e 0.1 '+add_pare)
start_time = time.time()
model = svm_train(prob, param)
train_time = time.time() - start_time
if save_file != '':
logger.info("svm model saved to " + save_file)
svm_save_model(save_file, model)
start_time = time.time()
#predict_y, predict_acc, predict_val = svm_predict(test_y_vector, test_x_matrix, model, '-b 1')
predict_y, predict_acc, predict_val = svm_predict(test_y_vector, test_x_matrix, model)
test_time = time.time() - start_time
#predict_val = np.array(predict_val)
#predict_y = np.array(predict_y)
#print predict_val.shape
#print predict_y.shape
predict_y = np.array(predict_y)
predict_val = np.zeros([len(predict_y), 2])
predict_val[:, 0] = 1 - predict_y
predict_val[:, 1] = predict_y
return predict_acc[0], predict_y, predict_val, train_time, test_time
def run_svm_svc(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, proba=False):
clf = svm.SVC()
print("kernel function:")
print(clf.kernel)
print(clf.decision_function_shape)
print(clf.degree)
print(clf.C)
clf.probability = proba
start_time = time.time()
clf.fit(train_x_matrix, train_y_vector)
train_time = time.time() - start_time
start_time = time.time()
predict_y = clf.predict(test_x_matrix)
test_time = time.time() - start_time
if proba == False:
predict_y_proba = None
else:
predict_y_proba = clf.predict_proba(test_x_matrix)
return accuracy_score(test_y_vector, predict_y), predict_y, predict_y_proba, train_time, test_time
def run_nn(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, proba=False):
clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
start_time = time.time()
clf.fit(train_x_matrix, train_y_vector)
train_time = time.time() - start_time
start_time = time.time()
predict_y = clf.predict(test_x_matrix)
test_time = time.time() - start_time
if proba == False:
predict_y_proba = None
else:
predict_y_proba = clf.predict_proba(test_x_matrix)
return accuracy_score(test_y_vector, predict_y), predict_y, predict_y_proba, train_time, test_time
##############################
# we only consider KNN with K=1
def run_feature_knn_use_proba(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, feature_array, attr_num, n_neighbors, class_id=-1, logger=None):
if logger==None:
logger = init_logging("")
logger.info('no log file: ')
num_classes, num_features = feature_array.shape
test_row, test_col = test_x_matrix.shape
knn_predict_matrix = np.zeros(test_row * num_classes).reshape(test_row, num_classes)
knn_predict_proba = np.zeros(test_row * num_classes).reshape(test_row, num_classes)
knn_train_time = 0
knn_test_time = 0
knn_accuracy = 0
proba = True
if class_id == -1:
min_class = min(train_y_vector)
max_class = max(train_y_vector) + 1
else:
min_class = class_id
max_class = class_id + 1
#result_matrix = np.zeros((10, num_classes))
for i in range(min_class, max_class):
logger.info('class: ' +str(i))
logger.info(str(feature_array[i]))
#print 'class: ' + str(i)
#print feature_array[i]
temp_train_x_matrix, temp_attr_num, temp_attr_len = feature_data_generation(train_x_matrix, attr_num, feature_array[i])
temp_test_x_matrix, temp_attr_num, temp_attr_len = feature_data_generation(test_x_matrix, attr_num, feature_array[i])
#print 'class: ' + str(i)
temp_train_y_vector = np.where(train_y_vector==i, 1, 0)
temp_test_y_vector = np.where(test_y_vector==i, 1, 0)
if i==0:
logger.info('sub feature data shape: ')
logger.info(str(temp_train_x_matrix.shape))
logger.info(str(temp_test_x_matrix.shape))
#print 'sub feature data shape:'
#print temp_train_x_matrix.shape
#print temp_test_x_matrix.shape
temp_accuracy, temp_predict_y, temp_predict_y_proba, temp_train_time, temp_test_time = run_knn(temp_train_x_matrix, temp_train_y_vector, temp_test_x_matrix, temp_test_y_vector, n_neighbors, proba)
#temp_predict_y_proba, temp_predict_y, temp_train_time, temp_test_time = run_knn_with_dist(temp_train_x_matrix, temp_train_y_vector, temp_test_x_matrix, temp_test_y_vector)
#temp_accuracy_1, temp_precision, temp_recall, temp_f1_value = f1_value_precision_recall_accuracy(temp_predict_y, temp_test_y_vector)
temp_accuracy, temp_precision, temp_recall, temp_f1_value, temp_tp, temp_fp, temp_tn, temp_fn = f1_value_precision_recall_accuracy(temp_predict_y, temp_test_y_vector)
#if temp_accuracy != temp_accuracy_1:
# logger.info(str(temp_accuracy))
# logger.info(str(temp_accuracy_1))
# #print temp_accuracy
# #print temp_accuracy_1
# raise Exception("Two accuracy results are not the same")
#result_matrix[0, i] = temp_accuracy
#result_matrix[1, i] = temp_precision
#result_matrix[2, i] = temp_recall
#result_matrix[3, i] = temp_f1_value
#result_matrix[4, i] = temp_tp
#result_matrix[5, i] = temp_fp
#result_matrix[6, i] = temp_tn
#result_matrix[7, i] = temp_fn
#result_matrix[8, i] = temp_train_time
#result_matrix[9, i] = temp_test_time
logger.info("Accuracy for class " + str(i) + ": " + str(temp_accuracy))
logger.info("Recall for class " + str(i) + ": " + str(temp_recall))
logger.info("Precision for class " + str(i) + ": " + str(temp_precision))
logger.info("F1 Score for class " + str(i) + ": " + str(temp_f1_value))
logger.info("Prediction matrix:")
logger.info("TP=" + str(temp_tp) + " FP=" + str(temp_fp))
logger.info("TN=" + str(temp_tn) + " FN=" + str(temp_fn))
knn_train_time = knn_train_time + temp_train_time
knn_test_time = knn_test_time + temp_test_time
proba_row, proba_col = temp_predict_y_proba.shape
knn_predict_matrix[:, i] = temp_predict_y_proba[:, 1]
logger.info('=============')
#break
knn_accuracy, knn_predict_y = predict_matrix_with_proba_to_predict_accuracy(knn_predict_matrix, knn_predict_matrix, test_y_vector)
return knn_accuracy, knn_train_time, knn_test_time, knn_predict_y
def load_predict_svm_proba(test_x_matrix, test_y_vector, feature_array, attr_num, save_pre, logger=None):
if logger==None:
logger = init_logging("")
logger.info('no log file: ')
num_classes, num_features = feature_array.shape
test_row, test_col = test_x_matrix.shape
svm_predict_matrix = np.zeros(test_row * num_classes).reshape(test_row, num_classes)
svm_predict_proba = np.zeros(test_row * num_classes).reshape(test_row, num_classes)
svm_train_time = 0
svm_test_time = 0
svm_accuracy = 0
proba = True
banlanced_ratio = 5
for i in range(0, num_classes):
#print 'class: ' + str(i)
#print feature_array[i]
logger.info("class: " + str(i))
logger.info(str(feature_array[i]))
temp_test_x_matrix, temp_attr_num, temp_attr_len = feature_data_generation(test_x_matrix, attr_num, feature_array[i])
#print 'class: ' + str(i)
if i==0:
logger.info('sub feature data shape: ')
logger.info(str(temp_train_x_matrix.shape))
logger.info(str(temp_test_x_matrix.shape))
temp_test_y_vector = np.where(test_y_vector==i, 1, 0)
save_file = save_pre + "_class" + str(i) + ".model"
temp_accuracy, temp_precision, temp_recall, temp_f1_value, temp_tp, temp_fp, temp_tn, temp_fn = f1_value_precision_recall_accuracy(temp_predict_y, temp_test_y_vector)
temp_predict_y = np.array(temp_predict_y)
temp_predict_y_proba = np.array(temp_predict_y_proba)
logger.info("Accuracy for class " + str(i) + ": " + str(temp_accuracy))
logger.info("Recall for class " + str(i) + ": " + str(temp_recall))
logger.info("Precision for class " + str(i) + ": " + str(temp_precision))
logger.info("F1 Score for class " + str(i) + ": " + str(temp_f1_value))
logger.info("Prediction matrix:")
logger.info("TP=" + str(temp_tp) + " FP=" + str(temp_fp))
logger.info("TN=" + str(temp_tn) + " FN=" + str(temp_fn))
svm_train_time = svm_train_time + temp_train_time
svm_test_time = svm_test_time + temp_test_time
proba_row, proba_col = temp_predict_y_proba.shape
svm_predict_matrix[:, i] = temp_predict_y
#svm_predict_proba[:, i] = temp_predict_y_proba[:, 1]
logger.info('=============')
#break
svm_accuracy, svm_predict_y = predict_matrix_with_proba_to_predict_accuracy(svm_predict_matrix, svm_predict_proba, test_y_vector)
return svm_accuracy, svm_train_time, svm_test_time, svm_predict_y
def run_feature_svm_use_proba(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, feature_array, attr_num, logger=None, save_pre=''):
if logger==None:
logger = init_logging("")
logger.info('no log file: ')
num_classes, num_features = feature_array.shape
test_row, test_col = test_x_matrix.shape
svm_predict_matrix = np.zeros(test_row * num_classes).reshape(test_row, num_classes)
svm_predict_proba = np.zeros(test_row * num_classes).reshape(test_row, num_classes)
svm_train_time = 0
svm_test_time = 0
svm_accuracy = 0
proba = True
banlanced_ratio = 5
for i in range(0, num_classes):
#print 'class: ' + str(i)
#print feature_array[i]
logger.info("class: " + str(i))
logger.info(str(feature_array[i]))
temp_train_x_matrix, temp_attr_num, temp_attr_len = feature_data_generation(train_x_matrix, attr_num, feature_array[i])
temp_test_x_matrix, temp_attr_num, temp_attr_len = feature_data_generation(test_x_matrix, attr_num, feature_array[i])
#print 'class: ' + str(i)
if i==0:
logger.info('sub feature data shape: ')
logger.info(str(temp_train_x_matrix.shape))
logger.info(str(temp_test_x_matrix.shape))
temp_train_y_vector = np.where(train_y_vector==i, 1, 0)
temp_test_y_vector = np.where(test_y_vector==i, 1, 0)
temp_train_x_matrix, temp_train_y_vector = banlanced_binary_processing(temp_train_x_matrix, temp_train_y_vector, banlanced_ratio)
save_file = save_pre + "_class" + str(i) + "_top" + str(temp_attr_num) + ".model"
logger.info('svm saved to ' + save_file)
temp_accuracy, temp_predict_y, temp_predict_y_proba, temp_train_time, temp_test_time = run_libsvm(temp_train_x_matrix, temp_train_y_vector, temp_test_x_matrix, temp_test_y_vector, logger, proba, save_file)
temp_accuracy, temp_precision, temp_recall, temp_f1_value, temp_tp, temp_fp, temp_tn, temp_fn = f1_value_precision_recall_accuracy(temp_predict_y, temp_test_y_vector)
temp_predict_y = np.array(temp_predict_y)
temp_predict_y_proba = np.array(temp_predict_y_proba)
logger.info("Accuracy for class " + str(i) + ": " + str(temp_accuracy))
logger.info("Recall for class " + str(i) + ": " + str(temp_recall))
logger.info("Precision for class " + str(i) + ": " + str(temp_precision))
logger.info("F1 Score for class " + str(i) + ": " + str(temp_f1_value))
logger.info("Prediction matrix:")
logger.info("TP=" + str(temp_tp) + " FP=" + str(temp_fp))
logger.info("TN=" + str(temp_tn) + " FN=" + str(temp_fn))
svm_train_time = svm_train_time + temp_train_time
svm_test_time = svm_test_time + temp_test_time
proba_row, proba_col = temp_predict_y_proba.shape
svm_predict_matrix[:, i] = temp_predict_y
svm_predict_proba[:, i] = temp_predict_y_proba[:, 1]
logger.info('=============')
#break
svm_accuracy, svm_predict_y = predict_matrix_with_proba_to_predict_accuracy(svm_predict_matrix, svm_predict_proba, test_y_vector)
return svm_accuracy, svm_train_time, svm_test_time, svm_predict_y
def run_feature_svm_load_proba(model_pre, test_x_matrix, test_y_vector, feature_array, attr_num, logger=None):
if logger==None:
logger = init_logging("")
logger.info('no log file: ')
num_classes, num_features = feature_array.shape
test_row, test_col = test_x_matrix.shape
svm_predict_matrix = np.zeros(test_row * num_classes).reshape(test_row, num_classes)
svm_predict_proba = np.zeros(test_row * num_classes).reshape(test_row, num_classes)
svm_train_time = 0
svm_test_time = 0
svm_accuracy = 0
proba = True
for i in range(0, num_classes):
#print 'class: ' + str(i)
#print feature_array[i]
logger.info("class: " + str(i))
logger.info(str(feature_array[i]))
#temp_train_x_matrix, temp_attr_num, temp_attr_len = feature_data_generation(train_x_matrix, attr_num, feature_array[i])
temp_test_x_matrix, temp_attr_num, temp_attr_len = feature_data_generation(test_x_matrix, attr_num, feature_array[i])
model_file = model_pre + '_class' + str(i) + "_top" + str(temp_attr_len) + ".model"
print(model_file)
logger.info('model file: ' + model_file)
start_time = time.time()
svm_model = svm_load_model(model_file)
temp_train_time = time.time() - start_time
svm_train_time = svm_train_time + temp_train_time
#print 'class: ' + str(i)
if i==0:
logger.info('sub feature data shape: ')
logger.info(str(temp_test_x_matrix.shape))
temp_test_y_vector = np.where(test_y_vector==i, 1, 0)
temp_test_x_matrix = temp_test_x_matrix.tolist()
temp_test_y_vector = temp_test_y_vector.astype(np.integer).tolist()
###START FROM HERE
start_time = time.time()
temp_predict_y, temp_accuracy, temp_predict_y_proba = svm_predict(temp_test_y_vector, temp_test_x_matrix, svm_model, '-b 1')
temp_test_time = time.time() - start_time
svm_train_time = svm_train_time + temp_test_time
temp_accuracy, temp_precision, temp_recall, temp_f1_value, temp_tp, temp_fp, temp_tn, temp_fn = f1_value_precision_recall_accuracy(temp_predict_y, temp_test_y_vector)
temp_predict_y = np.array(temp_predict_y)
temp_predict_y_proba = np.array(temp_predict_y_proba)
logger.info("Accuracy for class " + str(i) + ": " + str(temp_accuracy))
logger.info("Recall for class " + str(i) + ": " + str(temp_recall))
logger.info("Precision for class " + str(i) + ": " + str(temp_precision))
logger.info("F1 Score for class " + str(i) + ": " + str(temp_f1_value))
logger.info("Prediction matrix:")
logger.info("TP=" + str(temp_tp) + " FP=" + str(temp_fp))
logger.info("TN=" + str(temp_tn) + " FN=" + str(temp_fn))
proba_row, proba_col = temp_predict_y_proba.shape
svm_predict_matrix[:, i] = temp_predict_y
svm_predict_proba[:, i] = temp_predict_y_proba[:, 1]
logger.info('=============')
#break
svm_accuracy, svm_predict_y = predict_matrix_with_proba_to_predict_accuracy(svm_predict_matrix, svm_predict_proba, test_y_vector)
return svm_accuracy, svm_train_time, svm_test_time, svm_predict_y
def run_feature_nn_use_proba(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, feature_array, attr_num, start_class=0):
num_classes, num_features = feature_array.shape
test_row, test_col = test_x_matrix.shape
nn_predict_matrix = np.zeros(test_row * num_classes).reshape(test_row, num_classes)
nn_predict_proba = np.zeros(test_row * num_classes).reshape(test_row, num_classes)
nn_train_time = 0
nn_test_time = 0
nn_accuracy = 0
proba = True
for i in range(0, num_classes):
print('class: ' + str(i))
print(feature_array[i])
temp_train_x_matrix, temp_attr_num, temp_attr_len = feature_data_generation(train_x_matrix, attr_num, feature_array[i])
temp_test_x_matrix, temp_attr_num, temp_attr_len = feature_data_generation(test_x_matrix, attr_num, feature_array[i])
#print 'class: ' + str(i)
if i==0:
print('sub feature data shape:')
print(temp_train_x_matrix.shape)
print(temp_test_x_matrix.shape)
temp_accuracy, temp_predict_y, temp_predict_y_proba, temp_train_time, temp_test_time = run_nn(temp_train_x_matrix, train_y_vector, temp_test_x_matrix, test_y_vector, proba)
nn_train_time = nn_train_time + temp_train_time
nn_test_time = nn_test_time + temp_test_time
#nn_predict_proba = np.add(nn_predict_proba, temp_predict_y_proba)
#print nn_predict_proba.shape
#print temp_predict_y_proba.shape
nn_predict_matrix[:, i] = temp_predict_y
nn_predict_proba[:, i] = temp_predict_y_proba[:, i]
#break
nn_accuracy, nn_predict_y = predict_matrix_with_proba_to_predict_accuracy(nn_predict_matrix, nn_predict_proba, test_y_vector)
return nn_accuracy, nn_train_time, nn_test_time, nn_predict_y
def run_feature_lda_use_proba(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, feature_array, attr_num, logger=None):
if logger==None:
logger = init_logging("")
logger.info('no log file: ')
num_classes, num_features = feature_array.shape
test_row, test_col = test_x_matrix.shape
lda_predict_matrix = np.zeros(test_row * num_classes).reshape(test_row, num_classes)
lda_predict_proba = np.zeros(test_row * num_classes).reshape(test_row, num_classes)
lda_train_time = 0
lda_test_time = 0
lda_accuracy = 0
proba = True
for i in range(0, num_classes):
logger.info("class: " + str(i))
logger.info(str(feature_array[i]))
temp_train_x_matrix, temp_attr_num, temp_attr_len = feature_data_generation(train_x_matrix, attr_num, feature_array[i])
temp_test_x_matrix, temp_attr_num, temp_attr_len = feature_data_generation(test_x_matrix, attr_num, feature_array[i])
if i==0:
logger.info('sub feature data shape: ')
logger.info(str(temp_train_x_matrix.shape))
logger.info(str(temp_test_x_matrix.shape))
temp_train_y_vector = np.where(train_y_vector==i, 1, 0)
temp_test_y_vector = np.where(test_y_vector==i, 1, 0)
temp_accuracy, temp_predict_y, temp_predict_y_proba, temp_train_time, temp_test_time = run_lda(temp_train_x_matrix, temp_train_y_vector, temp_test_x_matrix, temp_test_y_vector, proba)
temp_accuracy, temp_precision, temp_recall, temp_f1_value, temp_tp, temp_fp, temp_tn, temp_fn = f1_value_precision_recall_accuracy(temp_predict_y, temp_test_y_vector)
logger.info("Accuracy for class " + str(i) + ": " + str(temp_accuracy))
logger.info("Recall for class " + str(i) + ": " + str(temp_recall))
logger.info("Precision for class " + str(i) + ": " + str(temp_precision))
logger.info("F1 Score for class " + str(i) + ": " + str(temp_f1_value))
logger.info("Prediction matrix:")
logger.info("TP=" + str(temp_tp) + " FP=" + str(temp_fp))
logger.info("TN=" + str(temp_tn) + " FN=" + str(temp_fn))
lda_train_time = lda_train_time + temp_train_time
lda_test_time = lda_test_time + temp_test_time
proba_row, proba_col = temp_predict_y_proba.shape
lda_predict_matrix[:, i] = temp_predict_y
lda_predict_proba[:, i] = temp_predict_y_proba[:, 1]
logger.info('=============')
#break
lda_accuracy, lda_predict_y = predict_matrix_with_proba_to_predict_accuracy(lda_predict_matrix, lda_predict_proba, test_y_vector)
return lda_accuracy, lda_train_time, lda_test_time, lda_predict_y
def run_feature_rf_use_proba(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, feature_array, attr_num, logger=None):
if logger==None:
logger = init_logging("")
logger.info('no log file: ')
num_classes, num_features = feature_array.shape
test_row, test_col = test_x_matrix.shape
rf_predict_matrix = np.zeros(test_row * num_classes).reshape(test_row, num_classes)
rf_predict_proba = np.zeros(test_row * num_classes).reshape(test_row, num_classes)
rf_train_time = 0
rf_test_time = 0
rf_accuracy = 0
proba = True
for i in range(0, num_classes):
logger.info("class: " + str(i))
logger.info(str(feature_array[i]))
temp_train_x_matrix, temp_attr_num, temp_attr_len = feature_data_generation(train_x_matrix, attr_num, feature_array[i])
temp_test_x_matrix, temp_attr_num, temp_attr_len = feature_data_generation(test_x_matrix, attr_num, feature_array[i])
if i==0:
logger.info('sub feature data shape: ')
logger.info(str(temp_train_x_matrix.shape))
logger.info(str(temp_test_x_matrix.shape))
temp_train_y_vector = np.where(train_y_vector==i, 1, 0)
temp_test_y_vector = np.where(test_y_vector==i, 1, 0)
temp_accuracy, temp_predict_y, temp_predict_y_proba, temp_train_time, temp_test_time = run_rf(temp_train_x_matrix, temp_train_y_vector, temp_test_x_matrix, temp_test_y_vector, 20, True)
temp_accuracy, temp_precision, temp_recall, temp_f1_value, temp_tp, temp_fp, temp_tn, temp_fn = f1_value_precision_recall_accuracy(temp_predict_y, temp_test_y_vector)
logger.info("Accuracy for class " + str(i) + ": " + str(temp_accuracy))
logger.info("Recall for class " + str(i) + ": " + str(temp_recall))
logger.info("Precision for class " + str(i) + ": " + str(temp_precision))
logger.info("F1 Score for class " + str(i) + ": " + str(temp_f1_value))
logger.info("Prediction matrix:")
logger.info("TP=" + str(temp_tp) + " FP=" + str(temp_fp))
logger.info("TN=" + str(temp_tn) + " FN=" + str(temp_fn))
rf_train_time = rf_train_time + temp_train_time
rf_test_time = rf_test_time + temp_test_time
proba_row, proba_col = temp_predict_y_proba.shape
rf_predict_matrix[:, i] = temp_predict_y
rf_predict_proba[:, i] = temp_predict_y_proba[:, 1]
logger.info('=============')
#break
rf_accuracy, rf_predict_y = predict_matrix_with_proba_to_predict_accuracy(rf_predict_matrix, rf_predict_proba, test_y_vector)
return rf_accuracy, rf_train_time, rf_test_time, rf_predict_y
def run_feature_rf_use_proba_old(train_x_matrix, train_y_vector, test_x_matrix, test_y_vector, feature_array, attr_num, logger=None):
num_classes, num_features = feature_array.shape
test_row, test_col = test_x_matrix.shape
rf_predict_proba = np.zeros(test_row * num_classes).reshape(test_row, num_classes)
rf_train_time = 0
rf_test_time = 0
rf_accuracy = 0
for i in range(0, num_classes):
print('class: ' + str(i))
temp_train_x_matrix, temp_attr_num, temp_attr_len = feature_data_generation(train_x_matrix, attr_num, feature_array[i])
temp_test_x_matrix, temp_attr_num, temp_attr_len = feature_data_generation(test_x_matrix, attr_num, feature_array[i])
#print 'class: ' + str(i)
if i==0:
print('sub feature data shape:')
print(temp_train_x_matrix.shape)
print(temp_test_x_matrix.shape)
temp_accuracy, temp_predict_y, temp_predict_y_proba, temp_train_time, temp_test_time = run_rf(temp_train_x_matrix, train_y_vector, temp_test_x_matrix, test_y_vector)
rf_train_time = rf_train_time + temp_train_time
rf_test_time = rf_test_time + temp_test_time
if temp_predict_y_proba != None:
temp_predict_y_proba[:, i] = temp_predict_y_proba[:, i]
print(temp_predict_y_proba)
rf_predict_proba = np.add(rf_predict_proba, temp_predict_y_proba)
#rf_predict_proba[:, i] = temp_predict_y_proba[:, i]
#break
rf_accuracy, rf_predict_y = predict_proba_to_predict_accuracy(rf_predict_proba, test_y_vector, start_class)
return rf_accuracy, rf_train_time, rf_test_time, rf_predict_y
if __name__ == '__main__':
train_x_matrix = np.array([[-1, -4, -7], [2, -1, 7], [-3, 2, 7], [1, 1, 7], [2, 1, 7], [3, 2, 7]]).astype(np.float64)
test_x_matrix = np.array([[12, 5, 7], [2, 1, -7], [-3, -2, -7], [-1, 1, -7]]).astype(np.float64)
train_y_vector = np.array([0, 1, 1, 1, 1, 1]).astype(np.float64)
test_y_vector = np.array([1, 0, 0, 0]).astype(np.float64)
```
#### File: Baselines/tsc_mtl/mtl_classifier.py
```python
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from collections import OrderedDict
from sklearn.metrics import mean_squared_error
import copy
import sys
from torchviz import make_dot
#The UEA MTS processing
from utils.UEA_utils import get_UEA_dataset
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--filename", type=str, default="WordsSynonyms")
parser.add_argument("--horizon", type=float, default=0.2)
parser.add_argument("--stride", type=float, default=0.2)
parser.add_argument("--seed1", type=int, default=17)
parser.add_argument("--seed2", type=int, default=10)
parser.add_argument("--alpha", type=float, default=0.1)
parser.add_argument("--sup_ratio", type=float, default=0.9)
args = parser.parse_args()
filename=args.filename
horizon=args.horizon
stride=args.stride
seed1=args.seed1
seed2=args.seed2
alpha=args.alpha
sup_ratio = args.sup_ratio
#sys.stdout=open("clf_mtl_"+str(seed1)+"_"+str(seed2)+"_"+filename+"_"+str(horizon)+"_"+str(stride)+"_"+str(alpha)+".log","w")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class MTNet(torch.nn.Module):
def __init__(self, horizon):
super(MTNet, self).__init__()
self.conv1 = nn.Conv1d(x_train.shape[1], 128, 9, padding=(9 // 2))
self.bnorm1 = nn.BatchNorm1d(128)
self.conv2 = nn.Conv1d(128, 256, 5, padding=(5 // 2))
self.bnorm2 = nn.BatchNorm1d(256)
self.conv3 = nn.Conv1d(256, 128, 3, padding=(3 // 2))
self.bnorm3 = nn.BatchNorm1d(128)
self.classification_head = nn.Linear(128, nb_classes)
'''****** START: UTS data forecasting ******'''
#self.forecasting_head = nn.Linear(128, horizon)
'''****** END: UTS data forecasting ******'''
'''****** START: MTS data forecasting ******'''
self.forecasting_head = nn.Linear(128, x_train.shape[1])
'''****** END: MTS data forecasting ******'''
def forward(self, x_class, x_forecast):
b1_c = F.relu(self.bnorm1(self.conv1(x_class)))
b1_f = F.relu(self.bnorm1(self.conv1(x_forecast)))
b2_c = F.relu(self.bnorm2(self.conv2(b1_c)))
b2_f = F.relu(self.bnorm2(self.conv2(b1_f)))
b3_c = F.relu(self.bnorm3(self.conv3(b2_c)))
b3_f = F.relu(self.bnorm3(self.conv3(b2_f)))
classification_features = torch.mean(b3_c, 2)#(64,128)#that is now we have global avg pooling, 1 feature from each conv channel
classification_out = self.classification_head(classification_features)
'''****** START: UTS data forecasting ******'''
'''forecasting_features = torch.mean(b3_f, 2)
forecasting_features = F.relu(self.bnorm4(self.conv4(b3_f)))
forecasting_out = self.forecasting_head(forecasting_features)'''
'''****** END: UTS data forecasting ******'''
'''****** START: MTS data forecasting ******'''
forecasting_mid = self.forecasting_head(b3_f.permute(0, 2, 1))
forecasting_out = forecasting_mid.permute(0, 2, 1)
'''****** END: MTS data forecasting ******'''
return classification_out, forecasting_out
def forward_test(self, x_class):
b1_c = F.relu(self.bnorm1(self.conv1(x_class)))
b2_c = F.relu(self.bnorm2(self.conv2(b1_c)))
b3_c = F.relu(self.bnorm3(self.conv3(b2_c)))
classification_features = torch.mean(b3_c, 2)#(64,128)#that is now we have global avg pooling, 1 feature from each conv channel
classification_out=self.classification_head(classification_features)
return classification_out
def optimize_network(x_batch_class, y_batch_class, x_forecast, y_forecast):
y_hat_classification, y_hat_forecasting = mtnet(x_batch_class.float(), x_forecast.float())
loss_classification = criterion_classification(y_hat_classification, y_batch_class.long())
loss_forecasting = criterion_forecasting(y_hat_forecasting, y_forecast.float())
loss_mtl = loss_classification+alpha*loss_forecasting
optimizer.zero_grad()
loss_mtl.backward()
optimizer.step()
return loss_classification.item(), loss_forecasting.item()
'''
train =pd.read_csv("/Users/Jingwei/Downloads/"+filename+"/"+filename+"_TRAIN",sep=",",header=None)
test=pd.read_csv("/Users/Jingwei/Downloads/"+filename+"/"+filename+"_TEST",sep=",",header=None)
df = pd.concat((train,test))
y_s = df.values[:,0]
nb_classes = len(np.unique(y_s))
y_s = (y_s - y_s.min())/(y_s.max()-y_s.min())*(nb_classes-1)
df[df.columns[0]] = y_s
train, test = train_test_split(df, test_size=0.2, random_state=seed1)
train_labeled, train_unlabeled = train_test_split(train, test_size=1-0.1, random_state=seed2)
train_unlabeled[train_unlabeled.columns[0]]=-1#Explicitly set all the instance's labels to -1
train_1=pd.concat((train_labeled,train_unlabeled))
x_train=train_1.values[:,1:]
y_train=train_1.values[:,0]
x_test=test.values[:,1:]
y_test=test.values[:,0]
x_train_mean = x_train.mean()
x_train_std = x_train.std()
x_train = (x_train - x_train_mean)/(x_train_std)
x_test = (x_test - x_train_mean)/(x_train_std)
x_train=x_train[:,np.newaxis,:]
x_test=x_test[:,np.newaxis,:]
#x_train=x_train[:,:,np.newaxis]
#x_test=x_test[:,:,np.newaxis]
max_acc_possible = 1-(sum([list(y_test).count(x) for x in list(set(np.unique(y_test))-set(np.unique(y_train)))])/len(y_test))
'''
'''*************** Start of UEA MTS data preparation ***************'''
rep_main = "/Users/Jingwei/Downloads/"
rep_ds_train = rep_main + filename + "/output_train/"
rep_ds_test = rep_main + filename + "/output_test/"
meta_csv = "meta_data.csv" # the meta data of training
dataset = get_UEA_dataset(rep_ds_train, rep_ds_test, meta_csv, sup_ratio, mode='load')
#extract the labeled and unlabeled MTS samples
x_sup = dataset['X_sup'] # 3-D Array: N * L * D
x_unsup = dataset['X_unsup']
y_sup = dataset['Y_sup'] # 1-D Array
y_unsup = [-1] * len(dataset['Y_unsup'])
x_train = np.concatenate((x_sup, x_unsup), axis=0)
y_train = np.concatenate((y_sup, y_unsup), axis=0)
x_test = dataset['X_test']
y_test = dataset['Y_test']
nb_classes = dataset['n_classes']
max_acc_possible = 1-(sum([list(y_test).count(x) for x in list(set(np.unique(y_test))-set(np.unique(y_train)))])/len(y_test))
x_train = np.transpose(x_train, (0,2,1))
x_test = np.transpose(x_test, (0,2,1))
'''*************** End of UEA MTS data preparation ***************'''
x_train = torch.from_numpy(x_train).to(device)
y_train = torch.from_numpy(y_train).to(device)
x_test = torch.from_numpy(x_test).to(device)
y_test = torch.from_numpy(y_test).to(device)
mtnet = MTNet(int(x_train.shape[2]*horizon)).to(device)
print(mtnet)
criterion_classification = nn.CrossEntropyLoss()
criterion_forecasting = nn.MSELoss()
optimizer = torch.optim.Adam(mtnet.parameters(), lr=1e-4)
batch_size = 32
accuracies=[]
def return_sliding_windows(X):
xf=[]
yf=[]
for i in range(0,X.shape[2],int(stride*X.shape[2])):
horizon1=int(horizon*X.shape[2])
if(i+horizon1+horizon1<=X.shape[2]):
# print("X===>",i,i+horizon)
# print("Y===>",i+horizon,i+horizon+horizon)
xf.append(X[:,:,i:i+horizon1])
yf.append(X[:,:,i+horizon1:i+horizon1+horizon1])
xf=torch.cat(xf)
yf=torch.cat(yf)
return xf,yf
x_sliding_window, y_sliding_window = return_sliding_windows(x_train)
def shuffler(x_train, y_train):
indexes=np.array(list(range(x_train.shape[0])))
np.random.shuffle(indexes)
x_train=x_train[indexes]
y_train=y_train[indexes]
return x_train, y_train
for t in range(5000):
losses=[]
x_train, y_train = shuffler(x_train, y_train)
x_train_batch=x_train[y_train!=-1]
y_train_batch=y_train[y_train!=-1]
x_sliding_window, y_sliding_window = shuffler(x_sliding_window, y_sliding_window)
for i in range(0,x_sliding_window.shape[0],batch_size):
if i+batch_size<=x_sliding_window.shape[0]:
closs,floss = optimize_network(x_train_batch, y_train_batch, x_sliding_window[i:i+batch_size], y_sliding_window[i:i+batch_size])
losses.append([closs,floss])
else:
closs,floss = optimize_network(x_train_batch, y_train_batch, x_sliding_window[i:], y_sliding_window[i:])
losses.append([closs,floss])
val_acc = accuracy_score(np.argmax(mtnet.forward_test(x_test.float()).cpu().detach().numpy(),1),y_test.long().cpu().numpy())
accuracies.append(val_acc)
print("Epoch: ",t,"| Accuracy: ",val_acc, "/",max(accuracies),"/",max_acc_possible, "| Avg. losses: ", np.mean([loss[0] for loss in losses]),np.mean([loss[1] for loss in losses]), flush=True)
if val_acc==1.0:
break;
``` |
{
"source": "JingwenCao/CEBD1160-Final_Project",
"score": 3
} |
#### File: JingwenCao/CEBD1160-Final_Project/breastcancer_analysis.py
```python
import numpy as np
import matplotlib
import matplotlib.pyplot as ppt
from matplotlib.colors import ListedColormap
from sklearn.datasets import load_breast_cancer
from sklearn import metrics
from sklearn import model_selection
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import PCA
from matplotlib.colors import ListedColormap
# 2. Load data
breastcancer = load_breast_cancer()
X, headers, y = breastcancer.data, breastcancer.feature_names, breastcancer.target
# 3. Standardize data
scaler = StandardScaler()
X = scaler.fit_transform(X)
# 3. Split data into training data and test data
X_train, X_test, Y_train, Y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
# 3a. Make data two dimensional
pca_model = PCA(n_components=2)
pca_model.fit(X_train)
X_train = pca_model.transform(X_train)
X_test = pca_model.transform(X_test)
X_train[:5]
# 5. Perform all classifications using various models
# 5a. Define all models
cla_names = ["Logistic Regression", "SVM", "Stochastic Gradient Descent", "Nearest Neighbors", "Naive Bayes", "Decision Trees", "Bagging Meta-Estimator", "Random Forest"]
classifiers = [
LogisticRegression(random_state=0),
SVC(kernel='rbf', random_state=0),
SGDClassifier(loss="hinge", penalty='l2', max_iter=5),
KNeighborsClassifier(n_neighbors=7),
GaussianNB(),
DecisionTreeClassifier(random_state=0),
BaggingClassifier(random_state=0),
RandomForestClassifier(random_state=0)]
# 5b. Apply all models in for loop
name_list = []
score = []
for name, clf in zip(cla_names, classifiers):
clf.fit(X_train, Y_train)
name_list.append(name)
score.append(clf.score(X_test, Y_test))
# 5c Plot performance
fig, ax = ppt.subplots(figsize=(20, 10))
ind = np.arange(8)
width = 0.9
ppt.bar(ind, score, width, 0)
ppt.xticks(ind, name_list)
ppt.title("Classifier Performance")
i=0
for i in range(len(score)):
ppt.annotate(score[i], xy=(i-0.4,0.99))
i = i+1
ppt.savefig ("Classifiers_Performance.png", format="PNG")
# 6. Plot all classification plots
fig = ppt.figure(figsize=(27,27))
# 6a. Plot three attributes that were my GOAT
matplotlib.style.use('ggplot')
h = 0.02
for i in [1,2,3,4,5,6,7,8,9]:
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
cm = ppt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = ppt.subplot(3,3,i)
ax.set_title("Input Data")
plot1=ax.scatter(X_train[:,0], X_train[:,1], c=Y_train, cmap=cm_bright, edgecolors='k')
plot2=ax.scatter(X_test[:,0], X_test[:,1], c=Y_test, cmap=cm_bright, alpha=0.6, edgecolors='k')
ax.legend((plot1, plot2), ('Training Data', 'Test Data'))
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
# 6b. Plot decision boundary function
def plot_decision_boundary(model,data,position):
ax = ppt.subplot(3,3,position)
model.fit(X_train, Y_train)
score = model.score(X_test, Y_test)
if hasattr(model, "decision_function"):
Z = model.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = model.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:,1]
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8, norm=None)
ax.scatter(X_train[:,0], X_train[:,1], c=Y_train, cmap=cm_bright, edgecolors='k')
ax.scatter(X_test[:,0], X_test[:,1], c=Y_test, cmap=cm_bright, alpha=0.3, edgecolors='k')
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(cla_names[i])
for i in (range(len(classifiers))):
plot_decision_boundary(classifiers[i], X, i+2)
ppt.tight_layout()
ppt.savefig ("Classifiers_Plots.png", format="PNG")
``` |
{
"source": "jingwenlow-davis/helpr",
"score": 2
} |
#### File: helpr/api/models.py
```python
from django.db import models
# from django.dispatch import receiver
from django.contrib.auth.models import AbstractUser
# from django.conf import settings
# from django.contrib.contenttypes.fields import GenericForeignKey
# from django.contrib.contenttypes.models import ContentType
# from rest_framework.authtoken.models import Token
# This code is triggered whenever a new user has been created and saved to the database
# @receiver(post_save, sender=settings.AUTH_USER_MODEL)
# def create_auth_token(sender, instance=None, created=False, **kwargs):
# if created:
# Token.objects.create(user=instance)
class User(AbstractUser):
'''
User info and profile
'''
# profile_picture = models.ImageField(upload_to='profile')
# location = models.idk(null=True, blank=True)
GENDER_CHOICES = (
('M', 'Male'),
('F', 'Female'),
('O', 'Other'),
)
gender = models.CharField(max_length=10, choices=GENDER_CHOICES, null=True, blank=True)
# lister fields
helpr = models.BooleanField(default=False) # if this use is listing anything or just a user
birthday = models.DateField(null=True, blank=True)
def __str__(self):
return self.username
class Category(models.Model):
'''
All possible categories
'''
CATEGORY_CHOICES = (
('photographer', 'photographer'),
('plumber', 'plumber'),
('landscaper', 'landscaper'),
('personal_trainer', 'personal trainer'),
('tutor', 'tutor'),
('carpenter', 'carpenter'),
('electrician', 'electrician'),
('pool_service', 'pool service'),
('gardener', 'gardnerer'),
('babysitting', 'babysitting'),
)
category = models.CharField(max_length=50, choices=CATEGORY_CHOICES, null=True, blank=True)
def __str__(self):
return self.category
class Listing(models.Model):
'''
Listing a user posts
'''
title = models.CharField(max_length=250)
user = models.ManyToManyField(User)
# profile_picture = models.ImageField(upload_to='profile')
# location = models.idk()
# images
blurb = models.TextField(max_length=500, null=True, blank=True)
# phone_number = models.PositiveIntegerField(null=True, blank=True)
# email = models.CharField(max_length=150)
website = models.CharField(max_length=150, null=True, blank=True)
category = models.ManyToManyField(Category)
# ratings = models.
def __str__(self):
return self.title
``` |
{
"source": "JingwenWang95/KinectFusion",
"score": 2
} |
#### File: JingwenWang95/KinectFusion/utils.py
```python
import time
import torch
import numpy as np
import yaml
from addict import Dict
import argparse
class ForceKeyErrorDict(Dict):
def __missing__(self, key):
raise KeyError(key)
def load_yaml(path):
with open(path, encoding='utf8') as yaml_file:
config_dict = yaml.load(yaml_file, Loader=yaml.FullLoader)
config = ForceKeyErrorDict(**config_dict)
return config
def load_config(args):
config_dict = load_yaml(args.config)
# merge args and config
other_dict = vars(args)
config_dict.update(other_dict)
return config_dict
def get_volume_setting(args):
voxel_size = args.voxel_size
vol_bnds = np.array(args.vol_bounds).reshape(3, 2)
vol_dims = (vol_bnds[:, 1] - vol_bnds[:, 0]) // voxel_size + 1
vol_origin = vol_bnds[:, 0]
return vol_dims, vol_origin, voxel_size
def get_time():
"""
:return: get timing statistics
"""
torch.cuda.synchronize()
return time.time()
``` |
{
"source": "JingwenWang95/neurecon",
"score": 2
} |
#### File: neurecon/models/ray_casting.py
```python
from models.base import ImplicitSurface
import numpy as np
from tqdm import tqdm
from typing import Union
from collections import OrderedDict
import torch
import torch.nn.functional as F
def run_secant_method(f_low, f_high, d_low, d_high,
rays_o_masked, rays_d_masked,
implicit_surface_query_fn,
n_secant_steps, logit_tau):
d_pred = - f_low * (d_high - d_low) / (f_high - f_low) + d_low
for i in range(n_secant_steps):
p_mid = rays_o_masked + d_pred.unsqueeze(-1) * rays_d_masked
with torch.no_grad():
# TODO: needs special design in here when the number of rays in each batch is different.
f_mid = implicit_surface_query_fn(p_mid).squeeze(-1) - logit_tau
ind_low = f_mid < 0
if ind_low.sum() > 0:
d_low[ind_low] = d_pred[ind_low]
f_low[ind_low] = f_mid[ind_low]
if (ind_low == 0).sum() > 0:
d_high[ind_low == 0] = d_pred[ind_low == 0]
f_high[ind_low == 0] = f_mid[ind_low == 0]
d_pred = - f_low * (d_high - d_low) / (f_high - f_low) + d_low
return d_pred
def run_bisection_method():
pass
def root_finding_surface_points(
surface_query_fn,
rays_o: torch.Tensor, rays_d: torch.Tensor,
near: Union[float, torch.Tensor]=0.0,
far: Union[float, torch.Tensor]=6.0,
# function config
batched = True,
batched_info = {},
# algorithm config
N_steps = 256,
logit_tau=0.0,
method='secant',
N_secant_steps = 8,
fill_inf=True,
normalize_factor=1.0
):
"""
rays_o: [(B), N_rays, 3]
rays_d: [(B), N_rays, 3]
near: float or [(B), N_rays]
far: float or [(B), N_rays]
"""
# NOTE: jianfei: modified from DVR. https://github.com/autonomousvision/differentiable_volumetric_rendering
# NOTE: DVR'logits (+)inside (-)outside; logits here, (+)outside (-)inside.
# NOTE: rays_d needs to be already normalized
with torch.no_grad():
device = rays_o.device
if not batched:
rays_o.unsqueeze_(0)
rays_d.unsqueeze_(0)
B = rays_o.shape[0]
N_rays = rays_o.shape[-2]
# [B, N_rays, N_steps, 1]
t = torch.linspace(0., 1., N_steps, device=device)[None, None, :]
if not isinstance(near, torch.Tensor):
near = near * torch.ones(rays_o.shape[:-1], device=device)
if not isinstance(far, torch.Tensor):
far = far * torch.ones(rays_o.shape[:-1], device=device)
d_proposal = near[..., None] * (1-t) + far[..., None] * t
# [B, N_rays, N_steps, 3]
p_proposal = rays_o.unsqueeze(-2) + d_proposal.unsqueeze(-1) * rays_d.unsqueeze(-2)
# only query sigma
pts = p_proposal / normalize_factor
# query network
# [B, N_rays, N_steps]
val = surface_query_fn(pts)
# [B, N_rays, N_steps]
val = val - logit_tau # centered at zero
# mask: the first point is not occupied
# [B, N_rays]
mask_0_not_occupied = val[..., 0] > 0
# [B, N_rays, N_steps-1]
sign_matrix = torch.cat(
[
torch.sign(val[..., :-1] * val[..., 1:]), # [B, N, N_steps-1]
torch.ones([B, N_rays, 1], device=device) # [B, N, 1]
], dim=-1)
# [B, N_rays, N_steps-1]
cost_matrix = sign_matrix * torch.arange(N_steps, 0, -1).float().to(device)
values, indices = torch.min(cost_matrix, -1)
# mask: at least one sign change occured
mask_sign_change = values < 0
# mask: whether the first sign change is from pos to neg (outside surface into the surface)
mask_pos_to_neg = val[torch.arange(B).unsqueeze(-1), torch.arange(N_rays).unsqueeze(0), indices] > 0
mask = mask_sign_change & mask_pos_to_neg & mask_0_not_occupied
#--------- secant method
# [B*N_rays, N_steps, 1]
d_proposal_flat = d_proposal.view([B*N_rays, N_steps, 1])
val_flat = val.view([B*N_rays, N_steps, 1])
N_secant = d_proposal_flat.shape[0]
# [N_masked]
d_high = d_proposal_flat[torch.arange(N_secant), indices.view(N_secant)].view([B, N_rays])[mask]
f_high = val_flat[torch.arange(N_secant), indices.view(N_secant)].view([B, N_rays])[mask]
indices = torch.clamp(indices + 1, max=N_steps - 1)
d_low = d_proposal_flat[torch.arange(N_secant), indices.view(N_secant)].view([B, N_rays])[mask]
f_low = val_flat[torch.arange(N_secant), indices.view(N_secant)].view([B, N_rays])[mask]
# [N_masked, 3]
rays_o_masked = rays_o[mask]
rays_d_masked = rays_d[mask]
# TODO: for categorical representation, mask latents here
if method == 'secant' and mask.sum() > 0:
d_pred = run_secant_method(
f_low, f_high, d_low, d_high,
rays_o_masked, rays_d_masked,
surface_query_fn,
N_secant_steps, logit_tau)
else:
d_pred = torch.ones(rays_o_masked.shape[0]).to(device)
# for sanity
pt_pred = torch.ones([B, N_rays, 3]).to(device)
pt_pred[mask] = rays_o_masked + d_pred.unsqueeze(-1) * rays_d_masked
d_pred_out = torch.ones([B, N_rays]).to(device)
d_pred_out[mask] = d_pred
# Insert appropriate values for points where no depth is predicted
if isinstance(far, torch.Tensor):
far = far[mask == 0]
d_pred_out[mask == 0] = np.inf if fill_inf else far # no intersections; or the first intersection is from outside to inside; or the 0-th point is occupied.
d_pred_out[mask_0_not_occupied == 0] = 0 # if the 0-th point is occupied, the depth should be 0.
if not batched:
d_pred_out.squeeze_(0)
pt_pred.squeeze_(0)
mask.squeeze_(0)
mask_sign_change.squeeze_(0)
return d_pred_out, pt_pred, mask, mask_sign_change
def sphere_tracing_surface_points(
implicit_surface: ImplicitSurface,
rays_o, rays_d,
# function config
near=0.0,
far=6.0,
batched = True,
batched_info = {},
# algorithm config
N_iters = 20,
):
device = rays_o.device
d_preds = torch.ones([*rays_o.shape[:-1]], device=device) * near
mask = torch.ones_like(d_preds, dtype=torch.bool, device=device)
for _ in range(N_iters):
pts = rays_o + rays_d * d_preds[..., :, None]
surface_val = implicit_surface.forward(pts)
d_preds[mask] += surface_val[mask]
mask[d_preds > far] = False
mask[d_preds < 0] = False
pts = rays_o + rays_d * d_preds[..., :, None]
return d_preds, pts, mask
def surface_render(rays_o: torch.Tensor, rays_d: torch.Tensor,
model,
calc_normal=True,
rayschunk=8192, netchunk=1048576, batched=True, use_view_dirs=True, show_progress=False,
ray_casting_algo='',
ray_casting_cfgs={},
**not_used_kwargs):
"""
input:
rays_o: [(B,) N_rays, 3]
rays_d: [(B,) N_rays, 3] NOTE: not normalized. contains info about ratio of len(this ray)/len(principle ray)
"""
with torch.no_grad():
device = rays_o.device
if batched:
DIM_BATCHIFY = 1
B = rays_d.shape[0] # batch_size
flat_vec_shape = [B, -1, 3]
else:
DIM_BATCHIFY = 0
flat_vec_shape = [-1, 3]
rays_o = torch.reshape(rays_o, flat_vec_shape).float()
rays_d = torch.reshape(rays_d, flat_vec_shape).float()
# NOTE: already normalized
rays_d = F.normalize(rays_d, dim=-1)
# ---------------
# Render a ray chunk
# ---------------
def render_rayschunk(rays_o: torch.Tensor, rays_d: torch.Tensor):
if use_view_dirs:
view_dirs = rays_d
else:
view_dirs = None
if ray_casting_algo == 'root_finding':
d_pred_out, pt_pred, mask, *_ = root_finding_surface_points(
model.implicit_surface, rays_o, rays_d, batched=batched, **ray_casting_cfgs)
elif ray_casting_algo == 'sphere_tracing':
d_pred_out, pt_pred, mask = sphere_tracing_surface_points(
model.implicit_surface, rays_o, rays_d, batched=batched, **ray_casting_cfgs)
else:
raise NotImplementedError
color, _, nablas = model.forward(pt_pred, view_dirs)
color[~mask] = 0 # black
# NOTE: all without grad. especially for nablas.
return color.data, d_pred_out.data, nablas.data, mask.data
colors = []
depths = []
nablas = []
masks = []
for i in tqdm(range(0, rays_o.shape[DIM_BATCHIFY], rayschunk), disable=not show_progress):
color_i, d_i, nablas_i, mask_i = render_rayschunk(
rays_o[:, i:i+rayschunk] if batched else rays_o[i:i+rayschunk],
rays_d[:, i:i+rayschunk] if batched else rays_d[i:i+rayschunk]
)
colors.append(color_i)
depths.append(d_i)
nablas.append(nablas_i)
masks.append(mask_i)
colors = torch.cat(colors, DIM_BATCHIFY)
depths = torch.cat(depths, DIM_BATCHIFY)
nablas = torch.cat(nablas, DIM_BATCHIFY)
masks = torch.cat(masks, DIM_BATCHIFY)
extras = OrderedDict([
('implicit_nablas', nablas),
('mask_surface', masks)
])
if calc_normal:
normals = F.normalize(nablas, dim=-1)
normals[~masks] = 0 # grey (/2.+0.5)
extras['normals_surface'] = normals
return colors, depths, extras
``` |
{
"source": "JingwenWang95/svox2",
"score": 2
} |
#### File: svox2/opt/extract_mesh.py
```python
import os
import numpy as np
from skimage import measure
import trimesh
data_dir = "../logs/lego/1"
data = np.load(os.path.join(data_dir, 'ckpt.npz'))
def get_scene_mesh(data, threshold=150.):
indices_flat = data['links'].reshape(-1)
density = data['density_data'][data['links'].reshape(-1)]
density[indices_flat < 0] = 0.
density = density.reshape(data['links'].shape)
vertices, faces, normals, _ = measure.marching_cubes(density,
threshold,
spacing=(2. / data['links'].shape[0],
2. / data['links'].shape[1],
2. / data['links'].shape[2]),
allow_degenerate=False)
vertices = np.array(vertices) + np.array([-1, -1, -1])
normals = np.array(normals)
return trimesh.Trimesh(vertices, faces, vertex_normals=normals)
mesh = get_scene_mesh(data)
mesh.export(os.path.join(data_dir, 'mesh.ply'))
mesh.show()
``` |
{
"source": "jingwenyvonne/IDS560",
"score": 3
} |
#### File: jingwenyvonne/IDS560/main.py
```python
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep, strftime
from random import randint
import pandas as pd
from selenium.webdriver.common.by import By
import time
from config import *
import logging
logging.basicConfig(filename='test.log', level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(message)s')
limits = {}
limits['follow_limit_per_hour'] = randint(5,10)
limits['unfollow_limit_per_hour'] = randint(3,10)
limits['like_limit_per_hour'] = randint(50,80)
limits['comment_limit_per_hour'] = randint(10,19)
# follow_limit_per_hour = randint(5,10)
# unfollow_limit_per_hour= randint(3,10)
# like_limit_per_hour = randint(80,120)
# comment_limit_per_hour = randint(30,50)
posts_to_reach_per_hashtag = 50
# Iterate through the hashtags stored in "hashtag_list"
new_followed = []
new_unfollowed=[]
my_dict = {}
my_dict_cum = {}
my_dict['followed'] = 0
my_dict['unfollowed']=0
my_dict['likes'] = 0
my_dict['comments'] = 0
my_dict['total_actions'] = 0
my_dict_time = {}
my_dict_time ['like_timer'] =time.time()
my_dict_time ['follow_timer'] =time.time()
my_dict_time ['unfollow_timer']=time.time()
my_dict_time ['comment_timer'] =time.time()
my_dict_cum['followed'] = 0
my_dict_cum['unfollowed']=0
my_dict_cum['likes'] = 0
my_dict_cum['comments'] = 0
my_dict_cum['total_actions'] = 0
# Use WebDriver to open a Chrome tab and navigate to Instagram login page
webdriver = webdriver.Chrome(executable_path = chromedriver_path)
webdriver.get("https://www.instagram.com/accounts/login")
sleep(1)
# In[36]:
username = webdriver.find_element_by_name("username")
username.send_keys(un)
password = webdriver.find_element_by_name("password")
password.send_keys(pw)
sleep(1)
# Click login button
login_Xpath = '//*[@id="loginForm"]/div/div[3]/button/div'
webdriver.find_element_by_xpath(login_Xpath).click()
sleep(5)
# In[37]:
# Click "Not Now" on "Save Your Login Info?" popup
not_now = webdriver.find_element_by_css_selector("#react-root > section > main > div > div > div > div > button")
not_now.click()
sleep(randint(2,5))
# Click "Not Now" on popup "Turn on Notifications"
not_now = webdriver.find_element_by_css_selector("body > div.RnEpo.Yx5HN > div > div > div > div.mt3GC > button.aOOlW.HoLwm")
not_now.click()
sleep(randint(2,5))
# In[38]:
# a ='45412'
# float(a.replace(',',''))
# In[39]:
#refresh
def refresh(un):
webdriver.get("https://www.instagram.com/"+un+'/')
sleep(randint(2,5))
picture=webdriver.find_element_by_css_selector("#react-root > section > main > div > div._2z6nI > article > div > div > div > div.v1Nh3.kIKUG._bz0w > a > div > div._9AhH0")
picture.click()
sleep(randint(2,5))
comment = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > textarea")
comment.click()
sleep(randint(2,5))
comment_hashtags= '#gold,#accessories,#earrings,#necklace'
comment = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > textarea")
comment.send_keys(comment_hashtags)
sleep(randint(2,5))
comment_click = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > button > div")
comment_click.click()
#Number of followers function
def num_followers(username):
url = "https://www.instagram.com/"+username+'/'
sleep(2)
webdriver.execute_script("window.open('');")
webdriver.switch_to.window(webdriver.window_handles[1])
webdriver.get(url)
sleep(3)
num_of_followers = webdriver.find_element_by_css_selector('#react-root > section > main > div > header > section > ul > li:nth-child(2) > a > div > span').text
if num_of_followers[-1] == 'k':
num = float(num_of_followers[:-1].replace(',',''))*1000
elif num_of_followers[-1] == 'm':
num = float(num_of_followers[:-1].replace(',',''))*1000000
else:
num = float(num_of_followers.replace(',',''))
sleep(2)
webdriver.close()
webdriver.switch_to.window(webdriver.window_handles[0])
return num
#Follow method and moving to next image
def unfollow():
if (time.time()-my_dict_time ['unfollow_timer']) < 3600 and my_dict['unfollowed']<limits['unfollow_limit_per_hour']:
for i in range(2):
webdriver.get("https://www.instagram.com/"+un+'/')
following_=webdriver.find_element_by_partial_link_text("following")
following_.click()
sleep(randint(1,3))
webdriver.find_element_by_xpath("/html/body/div[6]/div/div/div/div[3]/ul/div/li[1]/div/div[3]/button").click()
sleep(randint(1,3))
webdriver.find_element_by_xpath("/html/body/div[7]/div/div/div/div[3]/button[1]").click()
sleep(randint(1,3))
webdriver.find_element_by_xpath("/html/body/div[6]/div/div/div/div[1]/div/div[3]/div/button").click()
sleep(randint(1,2))
i+=1
my_dict['unfollowed']+=1
my_dict['total_actions']+=1
my_dict_cum['unfollowed']+=1
my_dict_cum['total_actions']+=1
logging.debug('unfollow : {}:total_unfollowed {}: total_actions {}'.format(username, my_dict_cum['unfollowed'],my_dict_cum['total_actions']))
elif (time.time()-my_dict_time ['unfollow_timer']) > 2*3600:
for i in range(5):
my_dict_time ['unfollow_timer'] =time.time()
my_dict['unfollowed'] = 0
limits['unfollow_limit_per_hour']= randint(3,10)
webdriver.get("https://www.instagram.com/"+un+'/')
following_=webdriver.find_element_by_partial_link_text("following")
following_.click()
sleep(randint(1,5))
webdriver.find_element_by_xpath("/html/body/div[6]/div/div/div/div[3]/ul/div/li[1]/div/div[3]/button").click()
sleep(randint(1,5))
webdriver.find_element_by_xpath("/html/body/div[7]/div/div/div/div[3]/button[1]").click()
sleep(randint(1,5))
webdriver.find_element_by_xpath("/html/body/div[6]/div/div/div/div[1]/div/div[3]/div/button").click()
sleep(randint(1,5))
# Increment "unfollowed" counter, add username to new_unfollowed list
new_unfollowed.append(username)
i+=1
my_dict['unfollowed'] += 1
my_dict['total_actions'] +=1
my_dict_cum['unfollowed']+=1
my_dict_cum['total_actions']+=1
logging.debug('unfollow : {}:total_unfollowed {}: total_actions {}'.format(username, my_dict_cum['unfollowed'],my_dict_cum['total_actions']))
elif (time.time()-my_dict_time ['unfollow_timer']) > 3600 and my_dict['unfollowed']<limits['unfollow_limit_per_hour']:
for i in range(5):
my_dict_time ['unfollow_timer'] =time.time()
my_dict['unfollowed'] = 0
limits['unfollow_limit_per_hour']= randint(3,10)
webdriver.get("https://www.instagram.com/"+un+'/')
following_=webdriver.find_element_by_partial_link_text("following")
following_.click()
sleep(randint(1,5))
webdriver.find_element_by_xpath("/html/body/div[6]/div/div/div/div[3]/ul/div/li[1]/div/div[3]/button").click()
sleep(randint(1,5))
webdriver.find_element_by_xpath("/html/body/div[7]/div/div/div/div[3]/button[1]").click()
sleep(randint(1,5))
webdriver.find_element_by_xpath("/html/body/div[6]/div/div/div/div[1]/div/div[3]/div/button").click()
sleep(randint(1,5))
# Increment "unfollowed" counter, add username to new_unfollowed list
new_unfollowed.append(username)
i+=1
my_dict['unfollowed'] += 1
my_dict['total_actions'] +=1
my_dict_cum['unfollowed']+=1
my_dict_cum['total_actions']+=1
logging.debug('unfollow : {}:total_unfollowed {}: total_actions {}'.format(username, my_dict_cum['unfollowed'],my_dict_cum['total_actions']))
def follow():
follow_ = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.UE9AK > div > header > div.o-MQd.z8cbW > div.PQo_0.RqtMr > div.bY2yH > button > div")
username = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.UE9AK > div > header > div.o-MQd.z8cbW > div.PQo_0.RqtMr > div.e1e1d > div > span > a").text
if (time.time()-my_dict_time ['follow_timer']) < 3600 and my_dict['followed']<limits['follow_limit_per_hour']:
# Click follow
follow_.click()
sleep(randint(30,60))
# Increment "followed" counter, add username to new_followed list
new_followed.append(username)
my_dict['followed'] += 1
my_dict['total_actions'] +=1
my_dict_cum['followed'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('follow : {}:total_followed {}: total_actions {}'.format(username, my_dict_cum['followed'],my_dict_cum['total_actions']))
elif (time.time()-my_dict_time ['follow_timer']) > 2*3600:
my_dict_time ['follow_timer'] =time.time()
my_dict['followed'] = 0
limits['follow_limit_per_hour'] = randint(5,10)
# Click follow
follow_.click()
sleep(randint(30,60))
# Increment "followed" counter, add username to new_followed list
new_followed.append(username)
my_dict['followed'] += 1
my_dict['total_actions'] +=1
my_dict_cum['followed'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('follow : {}:total_followed {}: total_actions {}'.format(username, my_dict_cum['followed'],my_dict_cum['total_actions']))
elif (time.time()-my_dict_time ['follow_timer']) > 3600 and my_dict['followed']<limits['follow_limit_per_hour']:
my_dict_time ['follow_timer'] =time.time()
my_dict['followed'] = 0
limits['follow_limit_per_hour'] = randint(5,10)
# Click follow
follow_.click()
sleep(randint(30,60))
# Increment "followed" counter, add username to new_followed list
new_followed.append(username)
my_dict['followed'] += 1
my_dict['total_actions'] +=1
my_dict_cum['followed'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('follow : {}:total_followed {}: total_actions {}'.format(username, my_dict_cum['followed'],my_dict_cum['total_actions']))
#like function
def like ():
if (time.time()-my_dict_time ['like_timer']) < 3600 and my_dict['likes'] <limits['like_limit_per_hour']:
like = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.ltpMr.Slqrh > span.fr66n > button")
like.click()
sleep(randint(30,60))
# Increment "likes" counter
my_dict['likes'] += 1
my_dict['total_actions'] +=1
my_dict_cum['likes'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('like: total_likes {}: total_actions {}'.format( my_dict_cum['likes'],my_dict_cum['total_actions']))
elif (time.time()-my_dict_time ['like_timer']) > 2*3600:
my_dict_time ['like_timer'] = time.time()
my_dict['likes'] = 0
limits['like_limit_per_hour'] = randint(80,120)
like = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.ltpMr.Slqrh > span.fr66n > button")
like.click()
sleep(randint(30,60))
# Increment "likes" counter
my_dict['likes'] += 1
my_dict['total_actions'] +=1
my_dict_cum['likes'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('like: total_likes {}: total_actions {}'.format( my_dict_cum['likes'],my_dict_cum['total_actions']))
elif (time.time()-my_dict_time ['like_timer']) > 3600 and my_dict['likes'] <limits['like_limit_per_hour']:
my_dict_time ['like_timer'] = time.time()
my_dict['likes'] = 0
limits['like_limit_per_hour'] = randint(80,120)
like = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.ltpMr.Slqrh > span.fr66n > button")
like.click()
sleep(randint(30,60))
# Increment "likes" counter
my_dict['likes'] += 1
my_dict['total_actions'] +=1
my_dict_cum['likes'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('like: total_likes {}: total_actions {}'.format( my_dict_cum['likes'],my_dict_cum['total_actions']))
#Comment function
def comment(num_of_followers):
if (time.time()-my_dict_time ['comment_timer']) < 3600 and my_dict['comments'] <limits ['comment_limit_per_hour']:
comment = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > textarea")
comment.click()
sleep(randint(1,5))
# Use "randint" to post different comments
rand_comment = randint(1,len(comments_list))
if num_of_followers>20000:
pick_comment = 'If you are interested being a brand ambassador please leave us a message on our page'
else:
pick_comment=comments_list[rand_comment]
comment = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > textarea")
comment.send_keys(pick_comment)
sleep(randint(1,5))
comment_click = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > button > div")
comment_click.click()
sleep(randint(30,60))
# Increment "comments" counter
my_dict['comments'] += 1
my_dict['total_actions'] +=1
my_dict_cum['comments'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('comment:total_comments {}: total_actions {}'.format( my_dict_cum['comments'],my_dict_cum['total_actions']))
elif (time.time()-my_dict_time ['comment_timer']) > 2*3600:
my_dict['comments'] = 0
my_dict_time ['comment_timer'] =time.time()
limits ['comment_limit_per_hour'] = randint(30,50)
comment = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > textarea")
comment.click()
sleep(randint(1,5))
# Use "randint" to post different comments
rand_comment = randint(1,len(comments_list))
#rand_comment=random.randrange(0,5)
if num_of_followers>20000:
pick_comment = 'If you are interested being a brand ambassador please leave us a message on our page'
else:
pick_comment=comments_list[rand_comment]
comment = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > textarea")
comment.send_keys(pick_comment)
sleep(randint(2,4))
comment_click = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > button > div")
comment_click.click()
sleep(randint(30,60))
# Increment "comments" counter
my_dict['comments'] += 1
my_dict['total_actions'] +=1
my_dict_cum['comments'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('comment:total_comments {}: total_actions {}'.format( my_dict_cum['comments'],my_dict_cum['total_actions']))
elif (time.time()-my_dict_time ['comment_timer']) > 3600 and my_dict['comments'] < limits ['comment_limit_per_hour']:
my_dict['comments'] = 0
my_dict_time ['comment_timer'] =time.time()
limits ['comment_limit_per_hour'] = randint(30,50)
comment = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > textarea")
comment.click()
sleep(randint(1,5))
# Use "randint" to post different comments
rand_comment = randint(1,len(comments_list))
#rand_comment=random.randrange(0,5)
if num_of_followers>20000:
pick_comment = 'If you are interested being a brand ambassador please leave us a message on our page'
else:
pick_comment=comments_list[rand_comment]
comment = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > textarea")
comment.send_keys(pick_comment)
sleep(randint(1,5))
comment_click = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.eo2As > section.sH9wk._JgwE > div > form > button > div")
comment_click.click()
sleep(randint(30,60))
# Increment "comments" counter
my_dict['comments'] += 1
my_dict['total_actions'] +=1
my_dict_cum['comments'] += 1
my_dict_cum['total_actions'] +=1
logging.debug('comment:total_comments {}: total_actions {}'.format( my_dict_cum['comments'],my_dict_cum['total_actions']))
# In[40]:
for hashtag in hashtag_list:
# Navigate to Instagram "explore/tags" page for current hashtag
webdriver.get("https://www.instagram.com/explore/tags/"+hashtag+"/")
sleep(randint(1,2))
# Click on the second thumbnail in the current hashtag's explore page
first_thumbnail = webdriver.find_element_by_css_selector("#react-root > section > main > article > div.EZdmt > div > div > div:nth-child(1) > div:nth-child(2) > a > div > div._9AhH0")
first_thumbnail.click()
sleep(randint(1,2))
try:
# Iterate through the current hashtag
for _ in range(posts_to_reach_per_hashtag):
try:
follow_ = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.UE9AK > div > header > div.o-MQd.z8cbW > div.PQo_0.RqtMr > div.bY2yH > button > div")
username = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.pbNvD.QZZGH.bW6vo > div > article > div > div.HP0qD > div > div > div.UE9AK > div > header > div.o-MQd.z8cbW > div.PQo_0.RqtMr > div.e1e1d > div > span > a").text
number_of_followers = num_followers(username)
sleep(randint(1,3))
if my_dict['total_actions']>=340 and my_dict['total_actions']<350:
unfollow()
elif my_dict['total_actions']>=350:
print('Actions during this session')
my_dict.items()
print('Total actions')
my_dict_cum.items()
refresh()
sleep(86400)
my_dict['followed'] = 0
my_dict['unfollowed']=0
my_dict['likes'] = 0
my_dict['comments'] = 0
my_dict['total_actions'] = 0
my_dict_time ['like_timer'] =time.time()
my_dict_time ['follow_timer'] =time.time()
my_dict_time ['unfollow_timer']=time.time()
my_dict_time ['comment_timer'] =time.time()
elif follow_.text == "Follow" and username != "jewelrymdjewelry" and number_of_followers >= 100:
follow()
sleep(randint(1,3))
like()
sleep(randint(1,3))
comment(number_of_followers)
sleep(randint(1,3))
# Click "next" to go to next picture within the same hashtag
next = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.Z2Inc._7c9RR > div > div.l8mY4.feth3 > button")
next.click()
sleep(randint(2,5))
except Exception as ex:
# Write out what type of Exception
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
driver_len = len(webdriver.window_handles) #fetching the Number of Opened tabs
if driver_len > 1: # Will execute if more than 1 tabs found.
for i in range(driver_len - 1, 0, -1):
webdriver.switch_to.window(webdriver.window_handles[i]) #will close the last tab first.
webdriver.close()
webdriver.switch_to.window(webdriver.window_handles[0]) # Switching the driver focus to First tab.
# Click "next" to go to next picture within the same hashtag
next = webdriver.find_element_by_css_selector("body > div.RnEpo._Yhr4 > div.Z2Inc._7c9RR > div > div.l8mY4.feth3 > button")
next.click()
sleep(randint(2,5))
except Exception as ex:
# Write out what type of Exception
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
driver_len = len(webdriver.window_handles) #fetching the Number of Opened tabs
if driver_len > 1: # Will execute if more than 1 tabs found.
for i in range(driver_len - 1, 0, -1):
webdriver.switch_to.window(webdriver.window_handles[i]) #will close the last tab first.
webdriver.close()
webdriver.switch_to.window(webdriver.window_handles[0]) # Switching the driver focus to First tab.
print(message)
# In[ ]:
my_dict_cum.items()
# In[ ]:
``` |
{
"source": "jingwoo4710/mmdetection-icevision",
"score": 2
} |
#### File: mmdetection-icevision/data-preprocess/ice-vision-data-merger-pipeline.py
```python
import os
from shutil import copyfile
import json
print("cwd = ", os.getcwd())
current_folder = os.getcwd()
#extracted_train_data = os.path.join(current_folder, "extracted_train_data")
extracted_train_data = "/dataset/training/"
#annotations_dir = '/data/annotations'
copied_train_data = "/data/dataset/training/"
# In[100]:
data_location = "/dataset/training/"
files_list = []
neural_net_list = []
linear_mappings = []
for subdir, dirs, files in os.walk(data_location):
for file in set(files):
if file.endswith('.pnm'):
current_file = os.path.join(subdir, file)
files_list.append(current_file)
print(len(files_list))
prev_file_number = 0
prev_file_dir_name = ""
prev_neural_net = ""
counter = 0
linear_list = []
########################################## EDIT #####################################################################
for file in sorted(files_list):
file_name_split = file.split('/')
file_number = int(file_name_split[-1].split(".pnm")[0])
dir_name = file_name_split[-3] + file_name_split[-2]
counter += file_number - prev_file_number
if(prev_file_dir_name != dir_name):
counter = 0
neural_net_list.append(file)
prev_neural_net = file
linear_list = []
else:
if(counter >= 5):
neural_net_list.append(file)
linear_mappings.append({ "linear_list": linear_list, "predecessor": prev_neural_net, "successor": file })
counter = 0
prev_neural_net = file
linear_list = []
else:
#linear_mappings[file] = "linear"
linear_list.append(file)
# print("making linear", file)
prev_file_number = file_number
prev_file_dir_name = dir_name
with open('linear_mappings.json', 'w') as outfile:
json.dump(linear_mappings, outfile)
# for file in file_body:
# if (file_body[file] == "neuralnet"):
# print(file)
# for file in file_body:
# if (file_body[file] == "linear"):
# print(file)
# In[97]:
#neural_net_list[] - list of images to be sent to neural network
import os
import glob
from mmdet.apis import init_detector, inference_detector, show_result, write_result
import time
import datetime
config_file = '/root/ws/mmdetection-icevision/configs/dcn/cascade_rcnn_dconv_c3-c5_r50_fpn_1x_all_classes.py'
#model = init_detector(config_file, checkpoint_file, device='cuda:0')
#epch_count = 1
#for epochs in glob.glob(os.path.join('/data_tmp/icevisionmodels/cascade_rcnn_dconv_c3-c5_r50_fpn_1x_all_classes/', '*.pth')):
checkpoint_file = '/data/trained_models/cascade_rcnn_dconv_c3-c5_r50_fpn_1x_135_classes/epoch_15.pth'
#checkpoint_file = epochs
# build the model from a config file and a checkpoint file
model = init_detector(config_file, checkpoint_file, device='cuda:0')
TEST_RESULT_PATH = "/data/test_results/"
img_count = 0
#print(img_count)
FINAL_ONLINE_TEST_PATH = "/data/train_subset/"
#FINAL_ONLINE_TEST_PATH = '/data/test_results/2018-02-13_1418/left/'
#for TEST_SET_PATH in (FINAL_ONLINE_TEST_PATH + "2018-02-16_1515_left/", FINAL_ONLINE_TEST_PATH + "2018-03-16_1424_left/", FINAL_ONLINE_TEST_PATH + "2018-03-23_1352_right/"):
#print(TEST_SET_PATH)
#imgs = glob.glob('/dataset/training/**/*.pnm', recursive=True)
for img in neural_net_list:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print ("time =", st)
#imgs = ['test.jpg', '000000.jpg']
#print(img) # /dataset/training/2018-02-13_1418/left/020963.pnm --> required format 2018-02-13_1418_left/000033
name = img.split("/") # ['', 'home', 'luminosity', 'ws', 'icevision', 'data', 'final', '2018-02-16_1515_left', '001887.jpg']
#print(name)
base = name[-1].split(".")[0] # ['001887', 'jpg']
#print(base)
name = name[-3] + "_" + name[-2]
tmp = name
name = name + "/" + base
#print(name)
######## Remove
#name_tmp = base.split("_")
#name = name_tmp[0] + "_" + name_tmp[1] + "_" + name_tmp[2] + "/" + name_tmp[-1]
#name = "annotation_train_subset/" + base
#base_list = base.split("_")
#name = base_list[0] + "_" + base_list[1] + "_" + base_list[2] + "/" + base_list[3]
##########Remove
result = inference_detector(model, img)
#write_result(name, result, model.CLASSES, out_file=os.path.join(TEST_RESULT_PATH, 'my_test_multi_scale_epch_{}.tsv'.format(epch_count))) # use name instead name1 for hackthon submission
#show_result(img, result, model.CLASSES, out_file= TEST_RESULT_PATH + 'bboxs/' + tmp + ".pnm")
write_result(name, result, model.CLASSES, out_file=os.path.join(TEST_RESULT_PATH, 'my_test_epch_15_interpolation.tsv')) # use name instead name1 for hackthon submission
img_count+=1
#print(img_count)
print("num = %d name = %s" %(img_count,name))
# In[103]:
import os
import glob
import csv
from shutil import copyfile
def linear_interpolation(pred, succ, lin_images, input_tsv, step, out_tsv):
lin_images.sort()
succ_base_name = os.path.basename(succ).split(".")[0]
pred_base_name = os.path.basename(pred).split(".")[0]
#copyfile(input_tsv, out_tsv)
tsv_file = csv.reader(open(input_tsv, "r"), delimiter="\t")
prd_classes = []
suc_classes = []
prd_keys = set()
suc_keys = set()
for row in tsv_file:
# print("row = ", row)
# print('ped_keys = ', prd_keys)
# print('suc_keys = ', suc_keys)
# frame xtl ytl xbr ybr class temporary data
# 2018-02-13_1418_left/020963 679 866 754 941 3.27
prd_record = {} #defaultdict(list)
suc_record = {} #defaultdict(list)
#print("row[0] = ", row[0])
x = os.path.join(os.path.basename(os.path.dirname(pred)),os.path.basename(pred))
y = os.path.basename(os.path.dirname(os.path.dirname(pred)))
dict_key = y + "_" + x
x2 = os.path.join(os.path.basename(os.path.dirname(succ)),os.path.basename(succ))
y2 = os.path.basename(os.path.dirname(os.path.dirname(succ)))
dict_key2 = <KEY>
# print('y = ', y)
# print("x = ", x)
# print("dict_key = ", dict_key.split('.')[0])
if row[0] == dict_key.split('.')[0]:
if row[5] not in prd_keys:
print("pred check cleared")
prd_record["class"] = row[5]
prd_record["xtl"] = row[1]
prd_record["ytl"] = row[2]
prd_record["xbr"] = row[3]
prd_record["ybr"] = row[4]
print("prd_record['ybr'] = ", prd_record["ybr"])
prd_keys.add(row[5])
# #prd_record[row[5]].append(row[1]) #xtl
# prd_record[row[5]].append(row[2]) #ytl
# prd_record[row[5]].append(row[3]) #xbr
# prd_record[row[5]].append(row[4]) #ybr
prd_classes.append(prd_record)
else:
for prd_class in prd_classes:
if prd_class["class"] == row[5]:
del prd_class
print("del prd_class")
elif row[0] == dict_key2.split('.')[0]:
print("Succ check cleared")
if row[5] not in suc_keys:
suc_record["class"] = row[5]
suc_record["xtl"] = row[1]
suc_record["ytl"] = row[2]
suc_record["xbr"] = row[3]
suc_record["ybr"] = row[4]
suc_keys.add(row[5])
# suc_record[row[5]].append(row[1])
# suc_record[row[5]].append(row[2])
# suc_record[row[5]].append(row[3])
# suc_record[row[5]].append(row[4])
suc_classes.append(suc_record)
else:
for suc_class in suc_classes:
if suc_class["class"] == row[5]:
del suc_class
print("del prd_class")
#print("prd_keys = ", prd_keys)
common_classes = prd_keys.intersection(suc_keys)
print(common_classes)
for common_class in common_classes:
for prd_class in prd_classes:
if prd_class["class"] == common_class:
for suc_class in suc_classes:
if suc_class["class"] == common_class:
xtl_gr = (int(prd_class["xtl"]) - int(suc_class["xtl"])) / step
ytl_gr = (int(prd_class["ytl"]) - int(suc_class["ytl"])) / step
xbr_gr = (int(prd_class["xbr"]) - int(suc_class["xbr"])) / step
ybr_gr = (int(prd_class["ybr"]) - int(suc_class["ybr"])) / step
print(xtl_gr, ytl_gr, xbr_gr, ybr_gr)
for f in lin_images:
curr_base = os.path.basename(f).split(".")[0]
# print("curr_base = ", curr_base)
# print("pred_base_name = ", pred_base_name)
# print("f = ", f)
factor = int(curr_base) - int(pred_base_name)
curr_xtl = int(prd_class["xtl"]) + (factor * xtl_gr)
curr_ytl = int(prd_class["ytl"]) + (factor * ytl_gr)
curr_xbr = int(prd_class["xbr"]) + (factor * xbr_gr)
curr_ybr = int(prd_class["ybr"]) + (factor * ybr_gr)
temp = ''
with open(out_tsv, mode = 'a') as result_file:
result_file_writer = csv.writer(result_file, delimiter = '\t')
result_file_writer.writerow([f, str(curr_xtl), str(curr_ytl), str(curr_xbr), str(curr_ybr), prd_class["class"], temp, temp])
# In[105]:
#load the linear mappings.json
import csv
linear_mappings = "/root/ws/mmdetection-icevision/data-preprocess/linear_mappings.json"
input_tsv = os.path.join(TEST_RESULT_PATH, 'my_test_epch_15_interpolation_copy.tsv')
out_tsv = os.path.join(TEST_RESULT_PATH, 'my_test_epch_15_interpolation_copy.tsv')
interpolation_mappings = []
with open(linear_mappings, 'r') as f:
interpolation_mappings = json.load(f)
for i in interpolation_mappings:
pred = i["predecessor"]
succ = i['successor']
interpol_list = i['linear_list']
step = 5
linear_interpolation(pred, succ, interpol_list, input_tsv, step, out_tsv)
# if i["predecessor"] == neural_net_list[100]:
# break
# In[70]:
# trial code
# extracted_train_data = "/home/sgj/temp/test_data/2018-03-16_1324"
# for subdir, dirs, files in os.walk(extracted_train_data):
# print("subdir = ", subdir)
# for file in files:
# if file.endswith('.jpg'):
# current_file = os.path.join(subdir, file)
# #folder_name = os.path.basename(os.path.dirname(current_file))
# #expected_name = folder_name + '_' + os.path.basename(current_file)
# y = file.split("_")
# expected_name = y[0] + "_" + y[1] + "_left_jpgs_" + y[2]
# absolute_expected_name = os.path.join(os.path.dirname(current_file),expected_name)
# os.rename(current_file, absolute_expected_name)
# In[37]:
extracted_train_data = "/home/sgj/temp/train_data/2018-02-13_1418_left_jpgs"
for subdir, dirs, files in os.walk(extracted_train_data):
print("subdir = ", subdir)
for file in files:
if file.endswith('.jpg'):
current_file = os.path.join(subdir, file)
folder_name = os.path.basename(os.path.dirname(current_file))
expected_name = folder_name + '_' + os.path.basename(current_file)
absolute_expected_name = os.path.join(os.path.dirname(current_file),expected_name)
os.rename(current_file, absolute_expected_name)
# In[25]:
# move out un-annotated images -
# ARGS -
# Annotations data tsv
# Extracted images folder
# Destination folder for annotated_data
import os
annotation_data_tsv_folder = "/home/sgj/nvme/ice-vision/annotations/test/all_validation_annotations"
extracted_images_folder = "/home/sgj/temp/test_data/all_validation_images"
#dest_annotated_imgs = "/home/sgj/nvme/ice-vision/annotated_data/val"
dest_annotated_imgs = "/home/sgj/temp/ice-vision/annotated_data/val"
os.makedirs(dest_annotated_imgs)
img_count = 0
for root, dirs, files in os.walk(annotation_data_tsv_folder):
for name in files:
if name.endswith('.tsv'):
prefix = name.split(".")[0]
image_name = prefix + ".jpg"
expected_img_path = os.path.join(extracted_images_folder, image_name)
new_image_path = os.path.join(dest_annotated_imgs, image_name)
if os.path.exists(expected_img_path):
img_count = img_count + 1
os.rename(expected_img_path, new_image_path)
else:
print("image missing-----------------------")
print("total images = ", img_count)
# In[18]:
temp = "2018-02-13_1418_left_jpgs_014810.tsv"
temp.split(".")[0]
# In[3]:
for subdir, dirs, files in os.walk(copied_train_data):
print("subdir = ", subdir)
for file in files:
if file.endswith('.pnm'):
current_file = os.path.join(subdir, file)
print('current file = ', current_file)
cam_dir = current_file.split('/')[-2]
#print("cam dir = ", cam_dir)
date_dir = current_file.split('/')[-3]
#print("date_dir = ", date_dir)
expected_folder = '/data/train_subset/'
expected_file_name = date_dir + "_" + cam_dir + "_" + os.path.basename(current_file)
expected_file_path = os.path.join(expected_folder, expected_file_name)
#copyfile(current_file, dst_file_path)
os.rename(current_file, expected_file_path)
print("expected_file_path = ", expected_file_path)
# In[4]:
# In[ ]:
``` |
{
"source": "jingwoo4710/Proj2_visualDet3D",
"score": 2
} |
#### File: networks/detectors/KM3D.py
```python
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch
import math
import time
from torchvision.ops import nms
from visualDet3D.networks.utils import DETECTOR_DICT
from visualDet3D.networks.detectors.KM3D_core import KM3DCore
from visualDet3D.networks.heads.km3d_head import KM3DHead
from visualDet3D.networks.lib.blocks import AnchorFlatten
from visualDet3D.networks.lib.look_ground import LookGround
from visualDet3D.networks.lib.ops.dcn.deform_conv import DeformConv
@DETECTOR_DICT.register_module
class KM3D(nn.Module):
"""
KM3D
"""
def __init__(self, network_cfg):
super(KM3D, self).__init__()
self.obj_types = network_cfg.obj_types
self.build_head(network_cfg)
self.build_core(network_cfg)
self.network_cfg = network_cfg
def build_core(self, network_cfg):
self.core = KM3DCore(network_cfg.backbone)
def build_head(self, network_cfg):
self.bbox_head = KM3DHead(
**(network_cfg.head)
)
def training_forward(self, img_batch, annotations, meta):
"""
Args:
img_batch: [B, C, H, W] tensor
annotations: check visualDet3D.utils.utils compound_annotation
meta:
calib: visualDet3D.kitti.data.kitti.KittiCalib or anything with obj.P2
epoch: current_epoch
Returns:
cls_loss, reg_loss: tensor of losses
loss_dict: [key, value] pair for logging
"""
features = self.core(dict(image=img_batch, P2=meta['P2']))
output_dict = self.bbox_head(features)
loss, loss_dict = self.bbox_head.loss(output_dict, annotations, meta)
return loss, loss_dict
def test_forward(self, img_batch, P2):
"""
Args:
img_batch: [B, C, H, W] tensor
calib: visualDet3D.kitti.data.kitti.KittiCalib or anything with obj.P2
Returns:
results: a nested list:
result[i] = detection_results for obj_types[i]
each detection result is a list [scores, bbox, obj_type]:
bbox = [bbox2d(length=4) , cx, cy, z, w, h, l, alpha]
"""
assert img_batch.shape[0] == 1 # we recommmend image batch size = 1 for testing
features = self.core(dict(image=img_batch, P2=P2))
output_dict = self.bbox_head(features)
scores, bboxes, cls_indexes = self.bbox_head.get_bboxes(output_dict, P2, img_batch)
return scores, bboxes, cls_indexes
def forward(self, inputs):
if isinstance(inputs, list) and len(inputs) == 3:
img_batch, annotations, meta = inputs
return self.training_forward(img_batch, annotations, meta)
else:
img_batch, calib = inputs
return self.test_forward(img_batch, calib)
``` |
{
"source": "JingWui/SynBioPython",
"score": 3
} |
#### File: synbiopython/genbabel/utilities.py
```python
import datetime
def getfilename():
"""Return the filename based on the datetime.
:return: the filename in year-month-day_hour-minute
:rtype: str
"""
timenow = datetime.datetime.now()
year = str(timenow.year % 100)
month = str(timenow.month).zfill(2)
day = str(timenow.day).zfill(2)
hour = str(timenow.hour).zfill(2)
minute = str(timenow.minute).zfill(2)
filename = year + month + day + "_" + hour + minute
return filename
``` |
{
"source": "JingWui/Test_mybinder",
"score": 2
} |
#### File: synbiopython/codon/table.py
```python
from collections import defaultdict
import os.path
import re
from urllib.request import urlretrieve
from synbiopython.codon import get_tax_id, DATA_DIR
_CODON_REGEX = r"([ATGCU]{3}) ([A-Z]|\*) (\d.\d+)"
def get_table(table_id, dna=True):
"""Get table."""
tax_id = get_tax_id(table_id)
if tax_id:
results = defaultdict(dict)
content = _get_content(tax_id)
for vals in sorted(
re.findall(_CODON_REGEX, content), key=lambda x: (x[1], x[2])
):
results[vals[1]][_get_codon(vals[0], dna)] = float(vals[2])
return dict(results)
return None
def _get_content(tax_id):
"""Get Kazusa content, either from cached file or remotely."""
target_file = os.path.join(DATA_DIR, "%s.txt" % tax_id)
if not os.path.exists(target_file):
url = (
"http://www.kazusa.or.jp/codon/cgi-bin/showcodon.cgi?"
+ "aa=1&style=N&species=%s" % tax_id
)
urlretrieve(url, target_file)
with open(target_file) as fle:
return fle.read()
def _get_codon(codon, dna):
"""Get codon."""
return codon.replace("U", "T") if dna else codon
```
#### File: genbabel/gensbolconv/GenSBOLconv.py
```python
import os
import requests
from Bio import SeqIO
from Bio.Graphics import GenomeDiagram
from reportlab.lib import colors
from reportlab.lib.units import cm
class GenSBOLconv:
"""Class to convert standard files (SBOL1, SBOL2, GenBank, Fasta, GFF3)."""
@staticmethod
def export_PlasmidMap(gbfile, filename=None):
""" Export Linear and Circular Plasmid Map for the imported GenBank file.
"""
record = SeqIO.read(gbfile, "genbank")
gd_diagram = GenomeDiagram.Diagram(record.id)
gd_track_for_features = gd_diagram.new_track(1, name="Annotated Features")
gd_feature_set = gd_track_for_features.new_set()
for feature in record.features:
if feature.type == "primer" or (feature.type == "misc_feature"):
continue
# if (feature.type != "CDS"):
# # Exclude this feature
# continue
if len(gd_feature_set) % 2 == 0:
color = colors.lightblue
else:
color = colors.blue
gd_feature_set.add_feature(
feature,
sigil="ARROW",
color=color,
label_size=12,
label_angle=0,
label=True,
)
# Draw Linear map from genbank
gd_diagram.draw(
format="linear",
orientation="landscape",
pagesize="A4",
fragments=4,
start=0,
end=len(record),
)
if filename is None:
linfile = "plasmid_linear.png"
circfile = "plasmid_circular.png"
else:
linfile = filename[0]
circfile = filename[1]
gd_diagram.write(linfile, "PNG")
# Draw circular map from genbank
gd_diagram.draw(
format="circular",
circular=True,
pagesize=(25 * cm, 20 * cm), # pagesize=(35 * cm, 30 * cm),
start=0,
end=len(record),
circle_core=0.5,
)
# gd_diagram.write("plasmid_circular.pdf", "PDF")
gd_diagram.write(circfile, "PNG")
return record.id
@staticmethod
def SBOLValidator(input_file, Output, uri_Prefix=""):
"""Code to invoke the SBOL Validator server over the internet."""
file = open(input_file).read()
request = {
"options": {
"language": Output,
"test_equality": False,
"check_uri_compliance": False,
"check_completeness": False,
"check_best_practices": False,
"fail_on_first_error": False,
"provide_detailed_stack_trace": False,
"subset_uri": "",
"uri_prefix": uri_Prefix,
"version": "",
"insert_type": False,
"main_file_name": "main file",
"diff_file_name": "comparison file",
},
"return_file": True,
"main_file": file,
}
# send POST request to the specified url (Response [200] means ok)
response = requests.post(
"https://validator.sbolstandard.org/validate/", json=request
)
return response
@staticmethod
def get_outputfile_extension(Filetype):
""" Get the output file extension based on the requested output language.
"""
switcher = {
"GenBank": ".gb",
"FASTA": ".fasta",
"GFF3": ".gff",
"SBOL1": ".sbol",
"SBOL2": ".sbol",
}
return switcher.get(Filetype, "unknown filetype")
def export_OutputFile(self, input_filename, Response, Output, outputfile=None):
"""Export the converted output file."""
filename_w_ext = os.path.basename(input_filename)
filename, _ = os.path.splitext(filename_w_ext)
if Response.json()["valid"]:
# export the result from json into the specific output file format
if outputfile is None:
output_filename = filename + self.get_outputfile_extension(Output)
else:
output_filename = outputfile
print("Output file: ", output_filename)
with open(output_filename, "w", newline="\n") as f:
f.write(Response.json()["result"])
else:
print("Error message: ", Response.json()["errors"])
def AutoRunSBOLValidator(self, Input_file, Output, uri_Prefix="", **kwargs):
"""Wrapper function for the SBOL Validator.
Parameters:
Input_file: input file or path to input file
Output: the Output file type (GenBank, FASTA, GFF3, SBOL1, SBOL2)
uri_Prefix: '' as default, URI Prefix is required for FASTA and GenBank
input conversion
Returns:
the validity of the Response, and export output file."""
Response = self.SBOLValidator(Input_file, Output, uri_Prefix)
output_filename = None
for key, value in kwargs.items():
if "outputfile" in key:
output_filename = value
self.export_OutputFile(Input_file, Response, Output, outputfile=output_filename)
return "valid: " + str(Response.json()["valid"])
```
#### File: genbabel/tests/test_gensbolconv.py
```python
import os
import pytest
import synbiopython.genbabel as stdgen
path0 = os.path.abspath(os.path.dirname(__file__))
sbolfile = "sequence1.xml"
sbolpath = os.path.join(path0, "data", sbolfile)
gbfile = "Testsequence1.gb"
gbpath = os.path.join(path0, "data", gbfile)
uri_Prefix_isbol = ""
uri_Prefix_igb = "http://synbiohub.org/public/igem"
stdconv = stdgen.GenSBOLconv()
@pytest.mark.stdconv
def test_sboltogb(tmpdir):
"""Test SBOL file conversion to Genbank file."""
path = os.path.join(str(tmpdir), sbolfile.split(".")[0] + ".gb")
Output = "GenBank"
uri_Prefix = uri_Prefix_isbol
Response = stdconv.AutoRunSBOLValidator(
sbolpath, Output, uri_Prefix, outputfile=path
)
print("tmppath: ", path)
assert Response == "valid: True"
assert os.path.exists(path)
@pytest.mark.stdconv
def test_sboltofasta(tmpdir):
"""Test SBOL file conversion to FASTA file."""
path = os.path.join(str(tmpdir), sbolfile.split(".")[0] + ".fasta")
Output = "FASTA"
uri_Prefix = uri_Prefix_isbol
Response = stdconv.AutoRunSBOLValidator(
sbolpath, Output, uri_Prefix, outputfile=path
)
assert Response == "valid: True"
assert os.path.exists(path)
@pytest.mark.stdconv
def test_sboltogff3(tmpdir):
"""Test SBOL file conversion to GFF3 file."""
path = os.path.join(str(tmpdir), sbolfile.split(".")[0] + ".gff")
Output = "GFF3"
uri_Prefix = uri_Prefix_isbol
Response = stdconv.AutoRunSBOLValidator(
sbolpath, Output, uri_Prefix, outputfile=path
)
assert Response == "valid: True"
assert os.path.exists(path)
@pytest.mark.stdconv
def test_gbtosbol(tmpdir):
"""Test Genbank file conversion to SBOL file."""
path = os.path.join(str(tmpdir), gbfile.split(".")[0] + ".sbol")
Output = "SBOL2"
uri_Prefix = uri_Prefix_igb
Response = stdconv.AutoRunSBOLValidator(gbpath, Output, uri_Prefix, outputfile=path)
assert Response == "valid: True"
assert os.path.exists(path)
@pytest.mark.stdconv
def test_gbtofasta(tmpdir):
"""Test Genbank file conversion to FASTA."""
path = os.path.join(str(tmpdir), gbfile.split(".")[0] + ".fasta")
Output = "FASTA"
uri_Prefix = uri_Prefix_igb
Response = stdconv.AutoRunSBOLValidator(gbpath, Output, uri_Prefix, outputfile=path)
assert Response == "valid: True"
assert os.path.exists(path)
@pytest.mark.stdconv
def test_gbtogff3(tmpdir):
"""Test Genbank file conversion to GFF3."""
path = os.path.join(str(tmpdir), gbfile.split(".")[0] + ".gff")
Output = "GFF3"
uri_Prefix = uri_Prefix_igb
# path = None
Response = stdconv.AutoRunSBOLValidator(gbpath, Output, uri_Prefix, outputfile=path)
assert Response == "valid: True"
assert os.path.exists(path)
# assert (Response == "valid: True")
@pytest.mark.stdconv
def test_export_PlasmidMap(tmpdir):
"""Test Plasmid Map Export function"""
path1 = os.path.join(str(tmpdir), "plasmid_linear.png")
path2 = os.path.join(str(tmpdir), "plasmid_circular.png")
recordid = stdconv.export_PlasmidMap(gbpath, (path1, path2))
assert recordid == "BBa_K874103.1"
assert os.path.exists(path1)
assert os.path.exists(path2)
```
#### File: genbabel/tests/test_sbmlgen.py
```python
import os
import re
import pytest
import synbiopython.genbabel as stdgen
import synbiopython.genbabel.sbmlgen.simplesbml as simplesbml
sbmlgen = stdgen.SBMLgen()
""" Testing SBMLgen module """
dx = "sigma *(y - x)"
dy = "rho*(z - y)"
dz = "beta*(y - z)"
ODE = [dx, dy, dz]
variable = ["x", "y", "z"]
Init = [0.8, 1.8, 19]
paramName = ["sigma", "rho", "beta"]
param = [20.0, 28, 3]
paramUnit = ["s-1", "s-1", "s-1"]
""" Testing simplesbml module, adopted from simplesbml original module """
model = simplesbml.sbmlModel()
# Create new model
model.addSpecies("Glucose", 3.4)
# Add 3.4 moles of species 'Glucose'
model.addSpecies("[ATP]", 1.0)
# Add 1.0 M of species 'ATP' (in concentration instead of amount)
model.addSpecies("[G6P]", 0.0)
model.addSpecies("[ADP]", 0.0)
model.addParameter("k1", 0.1)
# Default units are 1/s
model.addParameter("fracATP", 1.0, units="dimensionless")
# For assignment rule later
model.addReaction(
["Glucose", "ATP"], ["2 G6P", "ADP"], "kp*Glucose*ATP", local_params={"kp": 0.1}
)
# Glucose+ATP -> 2G6P+ADP
model.addEvent("G6P == 1", {"k1": "0.3"})
# When [G6P] = 1 mol/L, k1 is reassigned as 0.3
model.addAssignmentRule("fracATP", "ATP/(ATP+ADP)")
# Parameter fracATP is equal to ATP/(ATP+ADP)
@pytest.mark.sbmlgen
def test_exportsbml(tmpdir):
"""Test the SBML file generation and exportation."""
path = os.path.join(str(tmpdir), "Testsbml.xml")
sbml = sbmlgen.exportsbml(
ODE, variable, Init, paramName, param, paramUnit, outputfile=path
)
specieslist = re.findall(r'species id="(.*?)"', sbml, re.MULTILINE)
print(specieslist)
assert specieslist == ["x", "y", "z"]
assert os.path.exists(path)
@pytest.mark.sbmlgen
def test_simplesbml(tmpdir):
"""Test the additional functions available inside the simplesbml."""
code = simplesbml.writeCode(model.document)
specieslist = re.findall(r"species_id='(.*?)'", code, re.MULTILINE)
paramlist = re.findall(r"param_id='(.*?)'", code, re.MULTILINE)
rxnlist = re.findall(r"expression='(.*?)'", code, re.MULTILINE)
eventlist = re.findall(r"trigger='(.*?)'", code, re.MULTILINE)
asglist = re.findall("var='(.*?)', math='(.*?)'", code, re.MULTILINE)
path = os.path.join(str(tmpdir), "Testsimplesbml.xml")
f1 = open(path, "w+")
f1.write(model.toSBML())
f1.close()
assert specieslist == ["Glucose", "[ATP]", "[G6P]", "[ADP]"]
assert paramlist == ["k1", "fracATP"]
assert rxnlist == ["kp * Glucose * ATP"]
assert eventlist == ["G6P == 1"]
assert asglist == [("fracATP", "ATP / (ATP + ADP)")]
assert os.path.exists(path)
```
#### File: genbabel/tests/test_sedmlomexgen.py
```python
import os
import re
import pytest
import synbiopython.genbabel as stdgen
path0 = os.path.abspath(os.path.dirname(__file__))
sbmlfile = "gateNOT_d30_LB_state1_.xml"
sbmlpath = os.path.join(path0, "data", sbmlfile)
phrasedml_str = """
model1 = model "{}"
sim1 = simulate uniform(0, 720, 1000)
sim1.algorithm = rk4
task1 = run sim1 on model1
model2 = model model1 with state = 0
task2 = run sim1 on model2
plot "Fig 1: Pep (NOT gate 30C LB)" task1.time vs task1.Pep2, task2.Pep2
"""
omexgen = stdgen.SEDMLOMEXgen()
@pytest.mark.omexgen
def test_find_between():
"""Test the substring finding."""
s = "SEDMLOMEXgen"
first = "SEDML"
last = "gen"
substr = omexgen.find_between(s, first, last)
assert substr == "OMEX"
@pytest.mark.omexgen
def test_sbmltoantimony():
"""Test the SBML conversion to antimony string."""
antimony_str = omexgen.sbmltoantimony(sbmlpath)
model = re.search("model (.*)\n", antimony_str).group(1)
assert model == "*gateNOT_d30_LB_state1_()"
@pytest.mark.omexgen
def test_phrasedmltosedml(tmpdir):
"""Test the phrasedml string conversion SEDML."""
path = os.path.join(str(tmpdir), "Testsedml.xml")
sedml_str = omexgen.phrasedmltosedml(phrasedml_str, sbmlpath, outputfile=path)
model = re.search('source="(.*)"', sedml_str).group(1)
assert model == sbmlpath
assert os.path.exists(path)
@pytest.mark.omexgen
def test_exportomex(tmpdir):
"""Test the COMBINE OMEX file generation and exportation."""
antimony_str = omexgen.sbmltoantimony(sbmlpath)
path = os.path.join(str(tmpdir), "archive.omex")
omex_str, _ = omexgen.export_omex(antimony_str, phrasedml_str, outputfile=path)
antimonymodel = re.search("model (.*)\n", omex_str).group(1)
phrasedmlmodel = re.search('model1 = model "(.*)"', omex_str).group(1)
phrasedmlmodel = "*" + phrasedmlmodel + "()"
assert antimonymodel == phrasedmlmodel
assert os.path.exists(path)
@pytest.mark.omexgen
def test_getsbmlbiomodel(tmpdir):
"""Test getting SBML model from biomodel."""
path = os.path.join(str(tmpdir), "Testbiomodel.xml")
biomodelsbml_str = omexgen.get_sbml_biomodel("BIOMD0000000012", outputfile=path)
model = re.search('<model id="(.*?)(")', biomodelsbml_str).group(1)
assert model == "BIOMD0000000012"
assert os.path.exists(path)
```
#### File: synbiopython/genbabel/utilities.py
```python
import datetime
def getfilename():
"""Generate the filename based on the datetime."""
timenow = datetime.datetime.now()
year = str(timenow.year % 100)
month = str(timenow.month).zfill(2)
day = str(timenow.day).zfill(2)
hour = str(timenow.hour).zfill(2)
minute = str(timenow.minute).zfill(2)
filename = year + month + day + "_" + hour + minute
return filename
```
#### File: lab_automation/containers/helper_functions.py
```python
import math
import re
def compute_rows_columns(num_wells):
"""Convert 96->(8,12), 384->(16,24), etc."""
a = math.sqrt(num_wells / 6)
n_rows = int(round(2 * a))
n_columns = int(round(3 * a))
return n_rows, n_columns
def rowname_to_number(name):
"Convert A->1 Z->26 AA->27 etc."
if len(name) == 2:
return 26 * rowname_to_number(name[0]) + rowname_to_number(name[1])
try:
return "ABCDEFGHIJKLMNOPQRSTUVWXYZ".index(name) + 1
except IndexError:
raise ValueError(name + " is not a valid row name.")
def number_to_rowname(number):
"Convert 1->A 26->Z 27->AA etc."
if number > 26:
return number_to_rowname(int(number / 26)) + number_to_rowname(number % 26)
return "ABCDEFGHIJKLMNOPQRSTUVWXYZ"[number - 1]
def wellname_to_coordinates(wellname):
"""Convert A1->(1,1), H11->(8, 11), etc."""
rowname, colname = re.match("([a-zA-Z]+)([0-9]+)", wellname).groups()
return rowname_to_number(rowname), int(colname)
def coordinates_to_wellname(coords):
"""Convert (1,1)->A1, (4,3)->D3, (12, 12)->H12, etc."""
row, column = coords
return number_to_rowname(row) + str(column)
def wellname_to_index(wellname, num_wells, direction="row"):
""" Convert e.g. A1..H12 into 1..96
direction is either row for A1 A2 A3... or column for A1 B1 C1 D1 etc.
"""
n_rows, n_columns = compute_rows_columns(num_wells)
row, column = wellname_to_coordinates(wellname)
if direction == "row":
return column + n_columns * (row - 1)
elif direction == "column":
return row + n_rows * (column - 1)
else:
raise ValueError("`direction` must be in (row, column)")
def index_to_row_column(index, num_wells, direction="row"):
n_rows, n_columns = compute_rows_columns(num_wells)
if direction == "row":
row = 1 + int((index - 1) / n_columns)
column = 1 + ((index - 1) % n_columns)
elif direction == "column":
row, column = 1 + ((index - 1) % n_rows), 1 + int((index - 1) / n_rows)
else:
raise ValueError("`direction` must be in (row, column)")
return row, column
def index_to_wellname(index, num_wells, direction="row"):
""" Convert e.g. 1..96 into A1..H12"""
row, column = index_to_row_column(index, num_wells, direction)
return coordinates_to_wellname((row, column))
```
#### File: lab_automation/tests/test_Picklist.py
```python
import os
import pytest
import synbiopython.lab_automation as lab
source = lab.Plate96(name="Source")
destination = lab.Plate96(name="Destination")
source_well = source.wells["A1"]
destination_well = destination.wells["B2"]
volume = 25 * 10 ** (-6)
transfer_1 = lab.Transfer(source_well, destination_well, volume)
picklist = lab.PickList()
def test_add_transfer():
picklist.add_transfer(transfer=transfer_1)
assert isinstance(picklist.transfers_list[0], lab.Transfer)
def test_to_plain_string():
assert (
picklist.to_plain_string()
== "Transfer 2.50E-05L from Source A1 into Destination B2"
)
def test_to_plain_textfile(tmpdir):
path = os.path.join(str(tmpdir), "test.txt")
picklist.to_plain_textfile(filename=path)
assert os.path.exists(path)
def test_simulate():
with pytest.raises(ValueError):
picklist.simulate(inplace=False)
def test_restricted_to():
new_picklist = picklist.restricted_to(
source_well=destination_well, destination_well=destination_well
)
assert len(new_picklist.transfers_list) == 0
new_picklist_2 = picklist.restricted_to(
source_well=source_well, destination_well=destination_well
)
assert len(new_picklist_2.transfers_list) == 1
def test_sorted_by():
assert isinstance(lab.PickList().sorted_by(), lab.PickList)
def test_total_transferred_volume():
assert picklist.total_transferred_volume() == 25 * 10 ** (-6)
def test_enforce_maximum_dispense_volume():
new_picklist = picklist.enforce_maximum_dispense_volume(5 * 10 ** (-6))
assert len(new_picklist.transfers_list) == 5
def test_merge_picklists():
new_picklist = picklist.merge_picklists([picklist, picklist])
assert len(new_picklist.transfers_list) == 2
```
#### File: lab_automation/tests/test_tools.py
```python
import pytest
import numpy as np
from synbiopython.lab_automation import tools
def test_round_at():
assert tools.round_at(42.0) == 42.0
assert tools.round_at(6.28318, 10 ** (-2)) == 6.28
def test_dicts_to_columns():
test_dict = {1: np.nan, 2: {"a": np.nan}}
tools.replace_nans_in_dict(test_dict)
expected = {1: "null", 2: {"a": "null"}}
assert test_dict == expected
def test_human_seq_size():
assert tools.human_seq_size(42) == "42b"
tools.human_seq_size(1042) == "1.0k"
tools.human_seq_size(42000) == "42k"
def test_human_volume():
assert tools.human_volume(500) == "500 L"
```
#### File: lab_automation/tests/test_Transfer.py
```python
import pytest
import synbiopython.lab_automation as lab
def test_TransferError():
with pytest.raises(ValueError):
raise lab.TransferError()
source = lab.Plate96(name="Source")
destination = lab.Plate96(name="Destination")
source_well = source.wells["A1"]
destination_well = destination.wells["B2"]
volume = 25 * 10 ** (-6)
transfer = lab.Transfer(source_well, destination_well, volume)
def test_to_plain_string():
assert (
transfer.to_plain_string()
== "Transfer 2.50E-05L from Source A1 into Destination B2"
)
def test_to_short_string():
assert (
transfer.to_short_string()
== "Transfer 2.50E-05L (Source-A1) -> (Destination-B2)"
)
def test_with_new_volume():
new_volume = 50 * 10 ** (-7)
new_transfer = transfer.with_new_volume(new_volume)
assert new_transfer.volume == new_volume
def test_apply():
with pytest.raises(ValueError):
transfer.apply()
source_2 = lab.Plate96(name="Source_2")
source_2.wells["A1"].add_content({"Compound_1": 1}, volume=5 * 10 ** (-6))
destination_2 = lab.Plate96(name="Destination_2")
transfer_2 = lab.Transfer(source_2.wells["A1"], destination_2.wells["B2"], volume)
with pytest.raises(ValueError):
transfer_2.apply()
source_2.wells["A1"].add_content({"Compound_1": 1}, volume=25 * 10 ** (-6))
destination_2.wells["B2"].capacity = 3 * 10 ** (-6)
with pytest.raises(ValueError):
transfer_2.apply()
destination_2.wells["B2"].capacity = 50 * 10 ** (-6)
transfer_2.apply()
assert destination_2.wells["B2"].volume == volume
def test___repr__():
assert (
transfer.__repr__() == "Transfer 2.50E-05L from Source A1 into Destination B2"
)
``` |
{
"source": "jingxiangguo/Python-force-field-parameterizatoin-workflow",
"score": 2
} |
#### File: fortranAPI/test/test_import_fortranAPI.py
```python
import numpy as np
import os
from ctypes import CDLL
# Local library:
# fortran API:
import fortranAPI.IO
import fortranAPI.pair_correlation
# Third-party libraries:
import pytest
def test_import_IO_reader():
# get the dynamic library path from the fortranAPI IO module:
fortranlib_address = os.path.join(os.path.dirname(fortranAPI.IO.__file__),
"lib")
# Load the dynamic library of dcd trajectory reader:
dcd_lib = CDLL(os.path.join(fortranlib_address, "libdcd_reader.so"))
# Load the dynamic library of txt file reader:
txt_lib = CDLL(os.path.join(fortranlib_address, "libtxt_reader.so"))
# Load the dynamic library of txt file reader:
txt_lib = CDLL(os.path.join(fortranlib_address, "libxyz_reader.so"))
return None
def test_import_pair_correlation():
# get the dynamic library path from the fortranAPI IO module:
fortranlib_address = os.path.join(os.path.dirname(fortranAPI.pair_correlation.__file__),
"lib")
pcf_lib = CDLL(os.path.join(fortranlib_address, "libpair_correlation.so"))
return None
def test_import_general():
return None
```
#### File: Python-force-field-parameterizatoin-workflow/IO/writer.py
```python
import numpy as np
import multiprocessing as mp
import os
import sys
# Local library:
# Third-party library:
# -------------------------------------------------------------------------
# xyz writer
# -------------------------------------------------------------------------
def xyz_writer(filename, mode, label, total_atoms, xyz, box):
with open(filename, mode) as output:
output.write("%d\n" % total_atoms)
np.savetxt(output, np.c_[[box]])
for i in range(total_atoms):
output.write("%d %.15f %.15f %.15f\n"
% (label[i], xyz[i, 0], xyz[i, 1], xyz[i, 2]))
return None
# -------------------------------------------------------------------------
# Fortran xyz writer
# -------------------------------------------------------------------------
```
#### File: Python-force-field-parameterizatoin-workflow/sampling/potential_LAMMPS.py
```python
import logging
import os
import itertools
import sys
# Local library:
# Third-parties library:
# GMSO
import mbuild as mb
import gmso
from gmso.external.convert_mbuild import from_mbuild
from gmso.formats.top import write_top
from gmso.formats import write_lammpsdata
from unyt import unyt_quantity
# This module defines the force field output format for LAMMPS
# Customized pair_style function returns:
# A dictionary with the filename as "key", and its content as "values"
def choose_lammps_potential(ptype, force_field_parameters):
potential_logger = logging.getLogger(__name__)
potential_logger.debug("function:choose_lammps_potential "
"entered successfully")
# a list of available LAMMPS potential functional:
potential_type = {
"tersoff": __pair_style_tersoff,
"tersoff/table": __pair_style_tersoff,
"stillinger_weber": __pair_style_sw,
"lj/cut": __pair_style_lj_cut,
"buck/coul/long": __pair_style_buck_coul_long,
"lj/smooth/linear": __pair_style_lj_smooth_linear_GMSO
}
# raise the errors and exit the program if
# requested potential is not defined
if (ptype not in potential_type.keys()):
potential_logger.error(
"ERROR: LAMMPS potential type: "
" %s is invalid: \n" % ptype +
"Solutions: \n" +
"1. Check the spelling\n" +
"2. Define a customized force field "
"named %s in potential.py\n" % ptype +
"Currently available potential types are: " +
" , ".join(pt for pt in potential_type.keys()) +
"\n")
sys.exit("Error messages found in the log file")
# choose the chosen output force field
chosen_potential = potential_type[ptype]
output_force_field_dict = chosen_potential(ptype, force_field_parameters)
potential_logger.debug("function:choose_lammps_potential "
"returned successfully; Potential type: "
"%s is used ..." % ptype)
return output_force_field_dict
def propagate_force_field(wk_folder_tple, output_force_field_dict):
potential_logger = logging.getLogger(__name__)
potential_logger.debug("function: propagate_force_field "
"entered successfully !")
for every_type in wk_folder_tple:
for each_folder in every_type:
for output_file in output_force_field_dict:
output_content = output_force_field_dict[output_file]
if (len(output_content) > 1
and output_content[0] == "TurnOnGMSO"):
pass
#write_lammpsdata = output_content[1]
#filename = os.path.join(each_folder, output_file)
#write_lammpsdata(output_content[2], filename, output_content[-1])
else:
filename = os.path.join(each_folder, output_file)
with open(filename, "w") as output:
for line in output_content:
output.write(line)
potential_logger.debug("function:propagate_force_field "
" returned successfully; force-field parameters ")
return None
def set_lj_smooth_linear_GMSO(top, force_field_parameters):
ff = gmso.ForceField('ar.xml')
ar_type = ff.atom_types['Ar']
ar_type.parameters["sigma"] = unyt_quantity(force_field_parameters[1], 'angstrom')
ar_type.parameters["epsilon"] = unyt_quantity(force_field_parameters[0], "6.947694845464e-21*J")
for site in top.sites:
site.atom_type = ar_type
top.update_topology()
return top
def __pair_style_lj_smooth_linear_GMSO(ptype, force_field_parameters):
# output dictionary:
# Generate a small box of Argon atoms using mBuild
# output dictionary :
force_field_dict = {}
ar = mb.Compound(name='Ar')
# (1.3954 g/cm^3 / 39.948 amu) * (3 nm) ^3
packed_system = mb.fill_box(
compound=ar,
n_compounds=512,
box=mb.Box([4.22187, 4.22187, 4.22187]),
)
# Convert system to a backend object
top = from_mbuild(packed_system)
lamp_data_name = "ar.lmp"
force_field_dict[lamp_data_name] = ("TurnOnGMSO", write_lammpsdata, top, "atomic", set_lj_smooth_linear_GMSO)
return force_field_dict
def __pair_style_lj_cut(ptype, force_field_parameters):
# output dictionary :
force_field_dict = {}
# define the filename
include_file = "force_field_parameters"
# define the command for each filename
lammps_cmd_comment = "#pair style: %s is used \n" % ptype
lammps_cmd_1 = "pair_style lj/cut %.3f" % force_field_parameters[0]
lammps_cmd_2 = "pair_coeff * * %.9f %.9f" % (force_field_parameters[1],
force_field_parameters[2])
lammps_cmd_3 = "pair_modify tail yes"
force_field_dict[include_file] = (lammps_cmd_comment,
lammps_cmd_1,
lammps_cmd_2,
lammps_cmd_3)
return force_field_dict
def __pair_style_sw(ptype, force_field_parameters):
# output dictionary :
force_field_dict = {}
# define the filename
potential_file = "mW.sw"
# define the filename
include_file = "force_field_parameters"
lammps_cmd_comment = "#pair style: %s is used \n" % ptype
element = "WT"
command1 = "pair_style sw\n"
command2 = ("pair_coeff" + " " + "* *" +
" " + potential_file + " " + element + "\n")
pair_command = ((element+" ")*3 +
" ".join(str(para) for para in force_field_parameters))
force_field_dict[include_file] = (lammps_cmd_comment,
command1,
command2)
force_field_dict[potential_file] = (pair_command)
return force_field_dict
def __pair_style_tersoff(ptype, force_field_parameters):
# output dictionary:
force_field_dict = {}
# define the filename
potential_file = "WT_ML-BOP.tersoff"
# define the filename
include_file = "force_field_parameters"
lammps_cmd_comment = "# pair style: %s is used \n" % ptype
element = "WT"
if ("table" in ptype):
command1 = "pair_style tersoff/table\n"
else:
command1 = "pair_style tersoff\n"
command2 = "pair_coeff" + " * * " + potential_file + " " + element
pair_command = ((element + " ")*3 +
" ".join(str(para) for para in force_field_parameters))
force_field_dict[include_file] = (lammps_cmd_comment, command1, command2)
force_field_dict[potential_file] = (pair_command)
return force_field_dict
def __pair_style_buck_coul_long(ptype, force_field_parameters):
# output dictionary:
force_field_dict = {}
# define the filename
include_file = "force_field_parameters"
# comment of included potential file
lammps_cmd_comment = "#pair style: %s is used \n" % ptype
# lammps command:
lammps_command_1 = ("pair_style buck/coul/long %.3f"
% force_field_parameters[0])
lammps_command_2 = ("pair_coeff 1 1 %.5f %.5f %.5f"
% (force_field_parameters[1],
force_field_parameters[2],
force_field_parameters[3]))
lammps_command_3 = ("pair_coeff 2 2 %.5f %.5f %.5f"
% (force_field_parameters[4],
force_field_parameters[5],
force_field_parameters[6]))
lammps_command_4 = ("pair_coeff 1 2 %.5f %.5f %.5f"
% (force_field_parameters[7],
force_field_parameters[8],
force_field_parameters[9]))
lammps_command_5 = "kspace_style pppm 1.0e-4"
force_field_dict[include_file] = (lammps_cmd_comment,
lammps_command_1,
lammps_command_2,
lammps_command_3,
lammps_command_4,
lammps_command_5)
return force_field_dict
```
#### File: Tutorial_03_Optimizer_module/Himmelblau/Tutorial_03_Optimization_himmelblau.py
```python
from optimizer.gradient_free import NelderMeadSimplex
import numpy as np
import matplotlib.pyplot as plt
class Himmelblau():
def __init__(self, x_ranges, y_ranges):
self.x_limit = np.arange(x_ranges[0],x_ranges[1], x_ranges[-1])
self.y_limit = np.arange(y_ranges[0],y_ranges[1], y_ranges[-1])
self.z_mat = np.zeros((self.x_limit.size, self.y_limit.size))
counter_x = 0
for x in self.x_limit:
counter_y = 0
for y in self.y_limit:
self.z_mat[counter_x, counter_y] = np.log10(self.compute_z(np.array([x, y])))
counter_y += 1
counter_x += 1
return None
def visualize(self):
plt.xlabel("x")
plt.ylabel("y")
plt.contour(self.x_limit, self.y_limit, np.transpose(self.z_mat), 20)
plt.show()
return None
def compute_z(self, parameters):
predict_val = ((parameters[0]**2 + parameters[1] - 11 )**2 +
(parameters[0] + parameters[1]**2 -7)**2)
return predict_val
# "update" must be here. For force-matching, rdf-matching ..., this function
# will be used to update the best predicted properties.
def update(self, func_expand, best_func, status=None):
pass
return None
# method "optimize" must be here. the optimizer will assume every
# passed objective function will have a attribute of "optimize"
# "para_type_lst", and "status" also must be here, though they are not used
def optimize(self, para_type_lst, parameters, status=None):
return self.compute_z(parameters)
# input file name
input_file = "in_himmelblau"
# No lines skipped:
skipped_lines = 0
# The solution space of Himmelblau function
x = [-8, 8, 0.15]
y = [-8, 8, 0.15]
# initialize test objective functions
himmeblau_obj = Himmelblau(x, y)
# Visualize the solution space of Himmelblau
# local minmums:
# Solution 1: x = 3.0, y = 2.0
# Solution 2: x = -2.8051, y = 3.1313
# Solution 3: x = -3.7793, y = -3.2832
# Solution 4: x = 3.5844, y = -1.8481
# local maximum:
# Solution 1: x = -0.270845, y = -0.923039
himmeblau_obj.visualize()
# initialize optimizer ...
optimize_himme = NelderMeadSimplex(input_file,
himmeblau_obj,
skipped=skipped_lines)
# Optimization starts ...
optimize_himme.run_optimization()
# Load the solutions:
with open("best_parameters.txt") as content:
for line in content:
solution = np.array(line.split()).astype(np.float64)
x, y = solution
print("The Minimum found is x = %.4f, y = %.4f" % (x, y))
# initialize optimizer ...
optimize_himme = NelderMeadSimplex(input_file,
himmeblau_obj,
skipped=skipped_lines,
optimize_mode="max")
# Optimization starts ...
optimize_himme.run_optimization()
# Load the solutions:
with open("best_parameters.txt") as content:
for line in content:
solution = np.array(line.split()).astype(np.float64)
x, y = solution
print("The Maximum found is x = %.4f, y = %.4f" % (x, y))
``` |
{
"source": "jingxiang-li/kaggle-yelp",
"score": 2
} |
#### File: kaggle-yelp/archive/train_models.py
```python
from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import xgboost as xgb
import numpy as np
import pandas as pd
from os import listdir
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.metrics import f1_score
def aggregate_features(X):
return np.mean(X, axis=0)
def get_features():
biz_id_list = []
result_array = np.zeros((2000, 1024))
path = 'features/inception-21k-global/'
feature_files = listdir(path)
for i, f in enumerate(feature_files):
biz_id = int(f[:-4])
feature_bag = np.load('features/inception-21k-global/' + f)
out_feature = aggregate_features(feature_bag)
biz_id_list.append(biz_id)
result_array[i, :] = out_feature
col_names = [
'incpt21k-glp-avg-' +
str(i) for i in range(
result_array.shape[1])]
feature_frame = pd.DataFrame(
data=result_array,
index=biz_id_list,
columns=col_names)
return feature_frame
def get_response():
biz2label = pd.read_csv("rawdata/train.csv")
result_array = np.zeros((2000, 9))
for class_no in range(9):
response = [
1 if str(class_no) in str(l).split(" ") else 0 for l in biz2label["labels"]]
result_array[:, class_no] = response
response_frame = pd.DataFrame(
data=result_array,
index=biz2label["business_id"],
columns=['class' + str(i) for i in range(9)],
dtype=int)
return response_frame
def get_data():
X = get_features()
Y = get_response()
dataframe = pd.merge(X, Y, left_index=True, right_index=True)
return dataframe
# def get_testdata():
# photo2ftr = pd.read_csv("features/inception-21k-global-test.csv",
# index_col=0, header=None)
# photo2biz = pd.read_csv("rawdata/test_photo_to_biz.csv")
# biz_ids = np.unique(photo2biz["business_id"])
# test_data = np.zeros((len(biz_ids), 1024))
# for i, biz_id in enumerate(biz_ids):
# dd = photo2biz[photo2biz["business_id"] == biz_id]
# photo_ids = np.unique(dd["photo_id"])
# feature = np.mean(photo2ftr.loc[photo_ids].as_matrix(), axis=0)
# test_data[i, :] = feature
# np.save('features/inception-21k-global-test.npy', test_data)
# dataframe = get_data().as_matrix()
# np.save('features/train_data.npy', dataframe)
def evalerror(preds, dtrain):
pred_labels = [1 if p > 0 else 0 for p in preds]
labels = dtrain.get_label()
return 'f1-score', f1_score(labels, pred_labels)
def fpreproc(dtrain, dtest, param):
label = dtrain.get_label()
ratio = float(np.sum(label == 0)) / np.sum(label == 1)
param['scale_pos_weight'] = ratio
return (dtrain, dtest, param)
dataframe = np.load('features/train_data.npy')
X = dataframe[:, 0:1024]
y_array = dataframe[:, -9:].astype(int)
X_test = np.load("features/inception-21k-global-test.npy")
print(X_test.shape)
dtest = xgb.DMatrix(X_test)
preds_array = np.zeros((X_test.shape[0], y_array.shape[1]))
for y_index in range(9):
y = y_array[:, y_index]
dtrain = xgb.DMatrix(X, label=y)
param = {
'max_depth': 3,
'eta': 0.5,
'silent': 1,
'objective': 'binary:logitraw',
'nthread': 4}
cv_result = xgb.cv(
params=param,
dtrain=dtrain,
num_boost_round=2000,
nfold=5,
feval=evalerror,
early_stopping_rounds=30,
maximize=True,
verbose_eval=True,
fpreproc=fpreproc,
show_stdv=False)
# train model and predict on test set
opt_round_num = cv_result.shape[0] - 1
label = dtrain.get_label()
ratio = float(np.sum(label == 0)) / np.sum(label == 1)
param['scale_pos_weight'] = ratio
# add dtest here
clf = xgb.train(
params=param,
dtrain=dtrain,
num_boost_round=opt_round_num,
evals=[(dtrain, 'train')],
feval=evalerror,
maximize=True)
preds = (clf.predict(dtest) > 0).astype(int)
print(preds.shape)
print(preds_array.shape)
preds_array[:, y_index] = preds
np.savetxt("output/first_try.csv", preds_array, delimiter=",")
# sss = StratifiedShuffleSplit(dataframe[:, -3], 1, test_size=0.2)
# for train_index, test_index in sss:
# X_train, X_test = X[train_index], X[test_index]
# y_train, y_test = y[train_index], y[test_index]
# dtrain = xgb.DMatrix(X_train, label=y_train)
# dtest = xgb.DMatrix(X_test, label=y_test)
# param = {'max_depth':3, 'eta':0.1, 'silent':1, 'objective':'binary:logitraw', 'lambda':1}
# param['nthread'] = 4
# watchlist = [(dtrain,'train'), (dtest,'eval')]
# num_round = 2000
# def evalerror(preds, dtrain):
# pred_labels = [1 if p > 0 else 0 for p in preds]
# labels = dtrain.get_label()
# return 'f1-score', f1_score(labels, pred_labels)
# bst = xgb.train(param, dtrain, num_round, watchlist, feval=evalerror, early_stopping_rounds=30, maximize=True)
```
#### File: kaggle-yelp/model/level2_models.py
```python
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import xgboost as xgb
import argparse
from os import path
import os
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import pickle
from utils import *
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data',
choices=['21k', 'v3', 'colHist'],
default="21k")
parser.add_argument('--prob',
choices=['75', '50'],
default='75')
parser.add_argument('--reps',
choices=['5', '9'],
default='5')
parser.add_argument('--yix', type=int, default=0)
return parser.parse_args()
def get_params(args):
data_dir_path = "../level2-feature/" + \
str(args.yix) + "/" + "_".join((args.reps, args.prob, args.data))
assert path.exists(data_dir_path)
reps = int(args.reps)
prob = int(args.prob) / 100
return data_dir_path, reps, prob
def get_data_train(data_dir, args):
X_path = path.join(data_dir, "X_train.npy")
y_path = path.join(data_dir, "y_train.npy")
X = np.load(X_path)
y = np.load(y_path)
return (X, y)
########################################################################
# functions for xgboost training
def evalF1(preds, dtrain):
from sklearn.metrics import f1_score
labels = dtrain.get_label()
y_agg = labels[range(0, labels.shape[0], reps)]
pred_agg = agg_preds(preds, reps, vote_by_majority)
return 'f1-score', f1_score(y_agg, pred_agg)
def fpreproc(dtrain, dtest, param):
label = dtrain.get_label()
ratio = float(np.sum(label == 0)) / np.sum(label == 1)
param['scale_pos_weight'] = ratio
return (dtrain, dtest, param)
# functions for hyperparameters optimization
class Score:
def __init__(self, X, y, y_ix, reps):
self.y = y
self.dtrain = xgb.DMatrix(X, label=self.y)
self.reps = reps
def get_score(self, params):
params["max_depth"] = int(params["max_depth"])
params["min_child_weight"] = int(params["min_child_weight"])
params["num_boost_round"] = int(params["num_boost_round"])
print("Training with params:")
print(params)
cv_result = xgb.cv(params=params,
dtrain=self.dtrain,
num_boost_round=params['num_boost_round'],
nfold=5,
folds=makeKFold(5, self.y, self.reps),
feval=evalF1,
maximize=True,
fpreproc=fpreproc,
verbose_eval=True)
score = cv_result.ix[params['num_boost_round'] - 1, 0]
print(score)
return {'loss': -score, 'status': STATUS_OK}
def optimize(trials, X, y, y_ix, reps, max_evals):
space = {
'num_boost_round': hp.quniform('num_boost_round', 5, 50, 5),
'eta': hp.quniform('eta', 0.1, 0.5, 0.1),
'gamma': hp.quniform('gamma', 0, 1, 0.2),
'max_depth': hp.quniform('max_depth', 1, 6, 1),
'min_child_weight': hp.quniform('min_child_weight', 1, 7, 2),
'subsample': hp.quniform('subsample', 0.5, 1, 0.1),
'colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.1),
'colsample_bylevel': hp.quniform('colsample_bylevel', 0.5, 1, 0.1),
'silent': 1,
'objective': 'binary:logistic'
}
s = Score(X, y, y_ix, reps)
best = fmin(s.get_score,
space,
algo=tpe.suggest,
trials=trials,
max_evals=max_evals
)
best["max_depth"] = int(best["max_depth"])
best["min_child_weight"] = int(best["min_child_weight"])
best["num_boost_round"] = int(best["num_boost_round"])
del s
return best
def out_fold_pred(params, X, y_array, y_ix, reps):
preds = np.zeros((y_array.shape[0]))
y = y_array
params['silent'] = 1
params['objective'] = 'binary:logistic'
params['scale_pos_weight'] = float(np.sum(y == 0)) / np.sum(y == 1)
for train_ix, test_ix in makeKFold(5, y, reps):
X_train, X_test = X[train_ix, :], X[test_ix, :]
y_train = y[train_ix]
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test)
bst = xgb.train(params=params,
dtrain=dtrain,
num_boost_round=params['num_boost_round'],
evals=[(dtrain, 'train')],
feval=evalF1,
maximize=True,
verbose_eval=None)
preds[test_ix] = bst.predict(dtest, output_margin=True)
return preds
def get_model(params, X, y_array, y_ix, reps):
y = y_array
dtrain = xgb.DMatrix(X, label=y)
params['silent'] = 1
params['objective'] = 'binary:logistic'
params['scale_pos_weight'] = float(np.sum(y == 0)) / np.sum(y == 1)
bst = xgb.train(params=params,
dtrain=dtrain,
num_boost_round=params['num_boost_round'],
evals=[(dtrain, 'train')],
feval=evalF1,
maximize=True,
verbose_eval=None)
return bst
if __name__ == "__main__":
args = parse_args()
print("Y_index:", args.yix)
print("Data set:", args.data, args.reps, args.prob)
data_dir, reps, prob = get_params(args)
X, y = get_data_train(data_dir, args)
# save place
save_dir = "_".join(("../level2-models/" + str(args.yix) + "/" +
args.reps, args.prob, args.data))
print(save_dir)
if not path.exists(save_dir):
os.makedirs(save_dir)
# begin trainnig
y_ix = 0
print("training for class " + str(args.yix))
trials = Trials()
params = optimize(trials, X, y, y_ix, reps, 25)
preds = out_fold_pred(params, X, y, y_ix, reps)
model = get_model(params, X, y, y_ix, reps)
np.save(path.join(save_dir, "outFold.npy"), preds)
pickle.dump(model, open(path.join(save_dir, "model.pkl"), mode='wb'))
print(str(args) + " completes!")
```
#### File: kaggle-yelp/model/level3_model_rf.py
```python
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import f1_score
import argparse
from os import path
import os
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from utils import *
import pickle
np.random.seed(54568464)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--yix', type=int, default=0)
return parser.parse_args()
# functions for hyperparameters optimization
class Score:
def __init__(self, X, y):
self.y = y
self.X = X
def get_score(self, params):
params['n_estimators'] = int(params['n_estimators'])
params['max_depth'] = int(params['max_depth'])
params['min_samples_split'] = int(params['min_samples_split'])
params['min_samples_leaf'] = int(params['min_samples_leaf'])
params['n_estimators'] = int(params['n_estimators'])
print('Training with params:')
print(params)
# cross validation here
scores = []
for train_ix, test_ix in makeKFold(5, self.y, 1):
X_train, y_train = self.X[train_ix, :], self.y[train_ix]
X_test, y_test = self.X[test_ix, :], self.y[test_ix]
weight = y_train.shape[0] / (2 * np.bincount(y_train))
sample_weight = np.array([weight[i] for i in y_train])
clf = RandomForestClassifier(**params)
cclf = CalibratedClassifierCV(base_estimator=clf,
method='isotonic',
cv=makeKFold(3, y_train, 1))
cclf.fit(X_train, y_train, sample_weight)
pred = cclf.predict(X_test)
scores.append(f1_score(y_true=y_test, y_pred=pred))
print(scores)
score = np.mean(scores)
print(score)
return {'loss': -score, 'status': STATUS_OK}
def optimize(trials, X, y, max_evals):
space = {
'n_estimators': hp.quniform('n_estimators', 100, 500, 50),
'criterion': hp.choice('criterion', ['gini', 'entropy']),
'max_depth': hp.quniform('max_depth', 1, 7, 1),
'min_samples_split': hp.quniform('min_samples_split', 1, 9, 2),
'min_samples_leaf': hp.quniform('min_samples_leaf', 1, 5, 1),
'bootstrap': True,
'oob_score': True,
'n_jobs': -1
}
s = Score(X, y)
best = fmin(s.get_score,
space,
algo=tpe.suggest,
trials=trials,
max_evals=max_evals
)
best['n_estimators'] = int(best['n_estimators'])
best['max_depth'] = int(best['max_depth'])
best['min_samples_split'] = int(best['min_samples_split'])
best['min_samples_leaf'] = int(best['min_samples_leaf'])
best['n_estimators'] = int(best['n_estimators'])
best['criterion'] = ['gini', 'entropy'][best['criterion']]
best['bootstrap'] = True
best['oob_score'] = True
best['n_jobs'] = -1
del s
return best
def out_fold_pred(params, X, y):
# cross validation here
preds = np.zeros((y.shape[0]))
for train_ix, test_ix in makeKFold(5, y, 1):
X_train, y_train = X[train_ix, :], y[train_ix]
X_test = X[test_ix, :]
weight = y_train.shape[0] / (2 * np.bincount(y_train))
sample_weight = np.array([weight[i] for i in y_train])
clf = RandomForestClassifier(**params)
cclf = CalibratedClassifierCV(base_estimator=clf,
method='isotonic',
cv=makeKFold(3, y_train, 1))
cclf.fit(X_train, y_train, sample_weight)
pred = cclf.predict_proba(X_test)[:, 1]
preds[test_ix] = pred
return preds
def get_model(params, X, y):
clf = RandomForestClassifier(**params)
cclf = CalibratedClassifierCV(base_estimator=clf,
method='isotonic',
cv=makeKFold(3, y, 1))
weight = y.shape[0] / (2 * np.bincount(y))
sample_weight = np.array([weight[i] for i in y])
cclf.fit(X, y, sample_weight)
return cclf
args = parse_args()
data_dir = '../level3-feature/' + str(args.yix)
X_train = np.load(path.join(data_dir, 'X_train.npy'))
X_test = np.load(path.join(data_dir, 'X_test.npy'))
y_train = np.load(path.join(data_dir, 'y_train.npy'))
print(X_train.shape, X_test.shape, y_train.shape)
X_train_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_train_ext.npy')
X_test_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_test_ext.npy')
print(X_train_ext.shape, X_test_ext.shape)
X_train = np.hstack((X_train, X_train_ext))
X_test = np.hstack((X_test, X_test_ext))
print('Add Extra')
print(X_train.shape, X_test.shape, y_train.shape)
# Now we have X_train, X_test, y_train
trials = Trials()
params = optimize(trials, X_train, y_train, 50)
out_fold = out_fold_pred(params, X_train, y_train)
clf = get_model(params, X_train, y_train)
preds = clf.predict_proba(X_test)[:, 1]
save_dir = '../level3-model-final/' + str(args.yix)
print(save_dir)
if not path.exists(save_dir):
os.makedirs(save_dir)
# save model, parameter, outFold_pred, pred
with open(path.join(save_dir, 'model_rf.pkl'), 'wb') as f_model:
pickle.dump(clf.calibrated_classifiers_, f_model)
with open(path.join(save_dir, 'param_rf.pkl'), 'wb') as f_param:
pickle.dump(params, f_param)
np.save(path.join(save_dir, 'pred_rf.npy'), preds)
np.save(path.join(save_dir, 'outFold_rf.npy'), out_fold)
```
#### File: kaggle-yelp/model/level4_data.py
```python
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from os import path, listdir
import os
import pickle as pkl
import argparse
import re
import numpy as np
import xgboost as xgb
from scipy.special import expit
from utils import *
np.random.seed(998)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--yix', type=int, default=0)
return parser.parse_args()
args = parse_args()
data_dir = '../level3-feature/' + str(args.yix)
X_train = np.load(path.join(data_dir, 'X_train.npy'))
X_test = np.load(path.join(data_dir, 'X_test.npy'))
y_train = np.load(path.join(data_dir, 'y_train.npy'))
print(X_train.shape, X_test.shape, y_train.shape)
X_train_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_train_ext.npy')
X_test_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_test_ext.npy')
print(X_train_ext.shape, X_test_ext.shape)
X_train = np.hstack((X_train, X_train_ext))
X_test = np.hstack((X_test, X_test_ext))
print('Add Extra')
print(X_train.shape, X_test.shape, y_train.shape)
model_dir = '../level3-model-final/' + str(args.yix)
X_train_pred = np.vstack((
np.load(path.join(model_dir, 'outFold.npy')),
np.load(path.join(model_dir, 'outFold_rf.npy')),
np.load(path.join(model_dir, 'outFold_ext.npy'))
)).T
X_test_pred = np.vstack((
np.load(path.join(model_dir, 'pred.npy')),
np.load(path.join(model_dir, 'pred_rf.npy')),
np.load(path.join(model_dir, 'pred_ext.npy'))
)).T
X_train_all = np.hstack((X_train, X_train_pred))
X_test_all = np.hstack((X_test, X_test_pred))
print(X_train_all.shape)
print(X_train_all.shape)
save_dir = path.join("../level4-feature/" + str(args.yix))
if not path.exists(save_dir):
os.makedirs(save_dir)
np.save(path.join(save_dir, "X_train.npy"), X_train_all)
np.save(path.join(save_dir, "X_test.npy"), X_test_all)
```
#### File: kaggle-yelp/model/pic_level_ftr1.py
```python
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import argparse
import os
from os import path
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--yix', type=int, default=0)
return parser.parse_args()
def agg_function(x):
return np.concatenate(([np.mean(x),
np.std(x)],
np.percentile(x, range(0, 101, 10)),
[np.sum(x > 0.5) / x.size,
np.sum(x > 0.55) / x.size,
np.sum(x > 0.6) / x.size,
np.sum(x > 0.65) / x.size,
np.sum(x > 0.7) / x.size,
np.sum(x > 0.45) / x.size,
np.sum(x > 0.4) / x.size,
np.sum(x > 0.35) / x.size,
np.sum(x > 0.3) / x.size,
np.sum(x > 0.25) / x.size,
np.sum(x > 0.2) / x.size,
np.sum(x > 0.15) / x.size,
np.sum(x > 0.1) / x.size,
np.sum(x > 0.5),
np.sum(x > 0.55),
np.sum(x > 0.6),
np.sum(x > 0.65),
np.sum(x > 0.7),
np.sum(x > 0.45),
np.sum(x > 0.4),
np.sum(x > 0.35),
np.sum(x > 0.3),
np.sum(x > 0.25),
np.sum(x > 0.2),
np.sum(x > 0.15),
np.sum(x > 0.1),
x.size]))
args = parse_args()
save_dir = path.join('../pic-feature-final/', str(args.yix))
if not path.exists(save_dir):
os.makedirs(save_dir)
data_dir = path.join('../pic-feature/', str(args.yix))
# for training data
X_train = np.load(path.join(data_dir, 'pic_train.npy'))[:, -3:]
img_df = pd.read_csv('../data/imglist_train.txt', sep='\t', header=None)
img_index = img_df.ix[:, 0].as_matrix()
img_feature_df = pd.DataFrame(data=X_train, index=img_index)
photo2biz = pd.read_csv('../data/train_photo_to_biz_ids.csv',
index_col='photo_id')
biz_list = photo2biz['business_id'].unique()
result_list = []
for biz_id in biz_list:
photo_ids = photo2biz[photo2biz['business_id'] == biz_id].index.values
print(biz_id, len(photo_ids))
preds = img_feature_df.loc[photo_ids, :].as_matrix()
ft = np.apply_along_axis(agg_function, 0, preds).flatten(order='F')
print(ft.shape)
result_list.append(ft)
result_array = np.asarray(result_list)
np.save(path.join(save_dir, 'train.npy'), result_array)
# for test data
X_test = np.load(path.join(data_dir, 'pic_test.npy'))[:, -3:]
img_df = pd.read_csv('../data/imglist_test.txt', sep='\t', header=None)
img_index = img_df.ix[:, 0].as_matrix()
img_feature_df = pd.DataFrame(data=X_test, index=img_index)
photo2biz = pd.read_csv('../data/test_photo_to_biz.csv',
index_col='photo_id')
biz_list = photo2biz['business_id'].unique()
result_list = []
for biz_id in biz_list:
photo_ids = photo2biz[photo2biz['business_id'] == biz_id].index.values
print(biz_id, len(photo_ids))
preds = img_feature_df.loc[photo_ids, :].as_matrix()
ft = np.apply_along_axis(agg_function, 0, preds).flatten(order='F')
print(ft.shape)
result_list.append(ft)
result_array = np.asarray(result_list)
np.save(path.join(save_dir, 'test.npy'), result_array)
```
#### File: kaggle-yelp/model/randomForest.py
```python
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import argparse
from os import path, getcwd
import os
import re
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import f1_score
from utils import *
import logging
import logging.handlers
fmt = ("%(asctime)s - %(filename)s:%(lineno)s - "
"%(name)s - %(levelname)s - %(message)s")
formatter = logging.Formatter(fmt)
handler = logging.FileHandler("randomForest.log")
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setFormatter(formatter)
console.setLevel(logging.WARNING)
logger = logging.getLogger('randomForest')
logger.addHandler(handler)
logger.addHandler(console)
logger.setLevel(logging.DEBUG)
# logger.debug('This is debug message')
# logger.info('This is info message')
# logger.warning('This is warning message')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_folder', type=str, default="../feature/5_75")
parser.add_argument('--data_set', choices=['21k', 'v3', 'colHist'],
default="21k")
return parser.parse_args()
def get_params(args):
data_dir_path = path.abspath(path.join(getcwd(), args.data_folder))
assert path.exists(data_dir_path)
dir_name = path.basename(data_dir_path)
m = re.match(r"(\d+)_(\d+)", dir_name)
reps, prob = int(m.group(1)), int(m.group(2)) / 100
return data_dir_path, reps, prob
def get_data_train(data_dir, args):
X_path = path.join(data_dir, "X_train.npy")
y_path = path.join(data_dir, "y_train.npy")
X = np.load(X_path)
y = np.load(y_path)
if args.data_set == '21k':
sel_range = range(0, 2048)
elif args.data_set == 'colHist':
sel_range = range(2048, 2048 + 772)
else:
sel_range = range(2048 + 772, 6916)
return (X[:, sel_range], y)
########################################################################
# functions for hyperparameters optimization
class Score:
def __init__(self, X, y, y_ix, reps):
self.y = y[:, y_ix]
self.X = X
self.reps = reps
def get_score(self, params):
params["n_estimators"] = int(params["n_estimators"])
params["max_depth"] = int(params["max_depth"])
params["min_samples_split"] = int(params["min_samples_split"])
params["min_samples_leaf"] = int(params["min_samples_leaf"])
params["n_estimators"] = int(params["n_estimators"])
logger.info("Training with params:")
logger.info(params)
# cross validation here
scores = []
for train_ix, test_ix in makeKFold(5, self.y, self.reps):
X_train, y_train = self.X[train_ix, :], self.y[train_ix]
X_test, y_test = self.X[test_ix, :], self.y[test_ix]
weight = y_train.shape[0] / (2 * np.bincount(y_train))
sample_weight = np.array([weight[i] for i in y_train])
clf = RandomForestClassifier(**params)
cclf = CalibratedClassifierCV(base_estimator=clf,
method='isotonic',
cv=makeKFold(3, y_train, self.reps))
cclf.fit(X_train, y_train, sample_weight)
pred = cclf.predict(X_test)
scores.append(f1_score(y_true=y_test, y_pred=pred))
print(scores)
score = np.mean(scores)
logger.info(score)
return {'loss': -score, 'status': STATUS_OK}
def optimize(trials, X, y, y_ix, reps, max_evals):
space = {
'n_estimators': hp.quniform('n_estimators', 30, 150, 30),
'criterion': hp.choice('criterion', ['gini', 'entropy']),
'max_depth': hp.quniform('max_depth', 1, 5, 1),
'min_samples_split': hp.quniform('min_samples_split', 1, 9, 2),
'min_samples_leaf': hp.quniform('min_samples_leaf', 1, 5, 1),
'bootstrap': False,
'oob_score': False,
'n_jobs': -1
}
s = Score(X, y, y_ix, reps)
best = fmin(s.get_score,
space,
algo=tpe.suggest,
trials=trials,
max_evals=max_evals
)
best["n_estimators"] = int(best["n_estimators"])
best["max_depth"] = int(best["max_depth"])
best["min_samples_split"] = int(best["min_samples_split"])
best["min_samples_leaf"] = int(best["min_samples_leaf"])
best["n_estimators"] = int(best["n_estimators"])
best["criterion"] = ['gini', 'entropy'][best["criterion"]]
del s
return best
def out_fold_pred(params, X, y_array, y_ix, reps):
y = y_array[:, y_ix]
params['bootstrap'] = False
params['oob_score'] = False
params['n_jobs'] = -1
# cross validation here
preds = np.zeros((y_array.shape[0]))
for train_ix, test_ix in makeKFold(5, y, reps):
X_train, y_train = X[train_ix, :], y[train_ix]
X_test = X[test_ix, :]
weight = y_train.shape[0] / (2 * np.bincount(y_train))
sample_weight = np.array([weight[i] for i in y_train])
clf = RandomForestClassifier(**params)
cclf = CalibratedClassifierCV(base_estimator=clf,
method='isotonic',
cv=makeKFold(3, y_train, reps))
cclf.fit(X_train, y_train, sample_weight)
pred = cclf.predict_proba(X_test)[:, 1]
preds[test_ix] = pred
return preds
def get_model(params, X, y_array, y_ix, reps):
y = y_array[:, y_ix]
params['bootstrap'] = False
params['oob_score'] = False
params['n_jobs'] = -1
clf = RandomForestClassifier(**params)
cclf = CalibratedClassifierCV(base_estimator=clf,
method='isotonic',
cv=makeKFold(3, y, reps))
weight = y.shape[0] / (2 * np.bincount(y))
sample_weight = np.array([weight[i] for i in y])
cclf.fit(X, y, sample_weight)
return cclf
if __name__ == "__main__":
args = parse_args()
logger.info("Data Directory: " + args.data_folder)
logger.info("Data set: " + args.data_set)
data_dir, reps, prob = get_params(args)
X, y = get_data_train(data_dir, args)
# save place
save_dir_name = path.basename(__file__)[:-3] + "_" + \
str(reps) + "_" + str(int(100 * prob)) + "_" + args.data_set
save_dir = path.join(path.dirname(path.abspath(__file__)),
save_dir_name)
if not path.isdir(save_dir):
os.mkdir(save_dir)
# begin trainnig
for y_ix in range(9):
logger.info("training for class " + str(y_ix))
trials = Trials()
params = optimize(trials, X, y, y_ix, reps, 30)
preds = out_fold_pred(params, X, y, y_ix, reps)
model = get_model(params, X, y, y_ix, reps)
pickle.dump(params,
open(path.join(save_dir, "param_" + str(y_ix) + ".pkl"),
'wb'))
np.save(path.join(save_dir, "outFold_" + str(y_ix) + ".npy"), preds)
pickle.dump(model.calibrated_classifiers_,
open(path.join(save_dir, "model_" + str(y_ix) + ".pkl"),
'wb'))
logger.info(str(y_ix) + " completes!")
``` |
{
"source": "jingxianwen/E3SM",
"score": 3
} |
#### File: lib/CIME/cs_status.py
```python
from __future__ import print_function
from CIME.XML.standard_module_setup import *
from CIME.XML.expected_fails_file import ExpectedFailsFile
from CIME.test_status import TestStatus
import os
import sys
from collections import defaultdict
def cs_status(test_paths, summary=False, fails_only=False,
count_fails_phase_list=None,
expected_fails_filepath=None,
out=sys.stdout):
"""Print the test statuses of all tests in test_paths. The default
is to print to stdout, but this can be overridden with the 'out'
argument.
If summary is True, then only the overall status of each test is printed
If fails_only is True, then only test failures are printed (this
includes PENDs as well as FAILs).
If count_fails_phase_list is provided, it should be a list of phases
(from the phases given by test_status.ALL_PHASES). For each phase in
this list: do not give line-by-line output; instead, just report the
total number of tests that have not PASSed this phase (this includes
PENDs and FAILs). (This is typically used with the fails_only
option, but it can also be used without that option.)
If expected_fails_filepath is provided, it should be a string giving
the full path to a file listing expected failures for this test
suite. Expected failures are then labeled as such in the output.
"""
expect(not (summary and fails_only),
"Cannot have both summary and fails_only")
expect(not (summary and count_fails_phase_list),
"Cannot have both summary and count_fails_phase_list")
if count_fails_phase_list is None:
count_fails_phase_list = []
non_pass_counts = dict.fromkeys(count_fails_phase_list, 0)
xfails = _get_xfails(expected_fails_filepath)
test_id_output = defaultdict(str)
test_id_counts = defaultdict(int)
for test_path in test_paths:
test_dir=os.path.dirname(test_path)
ts = TestStatus(test_dir=test_dir)
test_id = os.path.basename(test_dir).split(".")[-1]
if summary:
output = _overall_output(ts, " {status} {test_name}\n")
else:
if fails_only:
output = ''
else:
output = _overall_output(ts, " {test_name} (Overall: {status}) details:\n")
output += ts.phase_statuses_dump(prefix=" ",
skip_passes=fails_only,
skip_phase_list=count_fails_phase_list,
xfails=xfails.get(ts.get_name()))
if count_fails_phase_list:
ts.increment_non_pass_counts(non_pass_counts)
test_id_output[test_id] += output
test_id_counts[test_id] += 1
for test_id in sorted(test_id_output):
count = test_id_counts[test_id]
print("{}: {} test{}".format(test_id, count, 's' if count > 1 else ''), file=out)
print(test_id_output[test_id], file=out)
print(' ', file=out)
if count_fails_phase_list:
print(72*'=', file=out)
print('Non-PASS results for select phases:', file=out)
for phase in count_fails_phase_list:
print('{} non-passes: {}'.format(phase, non_pass_counts[phase]), file=out)
def _get_xfails(expected_fails_filepath):
"""Returns a dictionary of ExpectedFails objects, where the keys are test names
expected_fails_filepath should be either a string giving the path to
the file containing expected failures, or None. If None, then this
returns an empty dictionary (as if expected_fails_filepath were
pointing to a file with no expected failures listed).
"""
if expected_fails_filepath is not None:
expected_fails_file = ExpectedFailsFile(expected_fails_filepath)
xfails = expected_fails_file.get_expected_fails()
else:
xfails = {}
return xfails
def _overall_output(ts, format_str):
"""Returns a string giving the overall test status
Args:
ts: TestStatus object
format_str (string): string giving the format of the output; must
contain place-holders for status and test_name
"""
test_name = ts.get_name()
status = ts.get_overall_test_status()
return format_str.format(status=status, test_name=test_name)
```
#### File: CIME/Servers/wget.py
```python
from CIME.XML.standard_module_setup import *
from CIME.Servers.generic_server import GenericServer
logger = logging.getLogger(__name__)
class WGET(GenericServer):
def __init__(self, address, user='', passwd=''):
self._args = ''
if user:
self._args += "--user {} ".format(user)
if passwd:
self._args += "--password {} ".format(passwd)
self._server_loc = address
cmd = "wget {} --no-check-certificate --spider {}".format(self._args, address)
err, output, _ = run_cmd(cmd, combine_output=True)
expect(err == 0,"Could not connect to repo via '{}'\nThis is most likely either a proxy, or network issue.\nOutput:\n{}".format(cmd, output))
def fileexists(self, rel_path):
full_url = os.path.join(self._server_loc, rel_path)
stat, out, err = run_cmd("wget {} --no-check-certificate --spider {}".format(self._args, full_url))
if (stat != 0):
logging.warning("FAIL: Repo '{}' does not have file '{}'\nReason:{}\n{}\n".format(self._server_loc, full_url, out, err))
return False
return True
def getfile(self, rel_path, full_path):
full_url = os.path.join(self._server_loc, rel_path)
stat, output, errput = \
run_cmd("wget {} {} -nc --no-check-certificate --output-document {}".format(self._args, full_url, full_path))
if (stat != 0):
logging.warning("wget failed with output: {} and errput {}\n".format(output, errput))
# wget puts an empty file if it fails.
try:
os.remove(full_path)
except OSError:
pass
return False
else:
logging.info("SUCCESS\n")
return True
def getdirectory(self, rel_path, full_path):
full_url = os.path.join(self._server_loc, rel_path)
stat, output, errput = \
run_cmd("wget {} {} -r -N --no-check-certificate --no-directories ".format(self._args, full_url+os.sep), from_dir=full_path)
logger.debug(output)
logger.debug(errput)
if (stat != 0):
logging.warning("wget failed with output: {} and errput {}\n".format(output, errput))
# wget puts an empty file if it fails.
try:
os.remove(full_path)
except OSError:
pass
return False
else:
logging.info("SUCCESS\n")
return True
```
#### File: lib/CIME/simple_compare.py
```python
import os, re
from CIME.utils import expect
###############################################################################
def _normalize_string_value(value, case):
###############################################################################
"""
Some of the strings are inherently prone to diffs, like file
paths, etc. This function attempts to normalize that data so that
it will not cause diffs.
"""
# Any occurance of case must be normalized because test-ids might not match
if (case is not None):
case_re = re.compile(r'{}[.]([GC])[.]([^./\s]+)'.format(case))
value = case_re.sub("{}.ACTION.TESTID".format(case), value)
if ("/" in value):
# File path, just return the basename
return os.path.basename(value)
elif ("username" in value):
return ''
elif (".log." in value):
# Remove the part that's prone to diff
components = value.split(".")
return os.path.basename(".".join(components[0:-1]))
else:
return value
###############################################################################
def _skip_comments_and_whitespace(lines, idx):
###############################################################################
"""
Starting at idx, return next valid idx of lines that contains real data
"""
if (idx == len(lines)):
return idx
comment_re = re.compile(r'^[#!]')
lines_slice = lines[idx:]
for line in lines_slice:
line = line.strip()
if (comment_re.match(line) is not None or line == ""):
idx += 1
else:
return idx
return idx
###############################################################################
def _compare_data(gold_lines, comp_lines, case, offset_method=False):
###############################################################################
"""
>>> teststr = '''
... data1
... data2 data3
... data4 data5 data6
...
... # Comment
... data7 data8 data9 data10
... '''
>>> _compare_data(teststr.splitlines(), teststr.splitlines(), None)
('', 0)
>>> teststr2 = '''
... data1
... data2 data30
... data4 data5 data6
... data7 data8 data9 data10
... data00
... '''
>>> results,_ = _compare_data(teststr.splitlines(), teststr2.splitlines(), None)
>>> print(results)
Inequivalent lines data2 data3 != data2 data30
NORMALIZED: data2 data3 != data2 data30
Found extra lines
data00
<BLANKLINE>
>>> teststr3 = '''
... data1
... data4 data5 data6
... data7 data8 data9 data10
... data00
... '''
>>> results,_ = _compare_data(teststr3.splitlines(), teststr2.splitlines(), None, offset_method=True)
>>> print(results)
Inequivalent lines data4 data5 data6 != data2 data30
NORMALIZED: data4 data5 data6 != data2 data30
<BLANKLINE>
"""
comments = ""
cnt = 0
gidx, cidx = 0, 0
gnum, cnum = len(gold_lines), len(comp_lines)
while (gidx < gnum or cidx < cnum):
gidx = _skip_comments_and_whitespace(gold_lines, gidx)
cidx = _skip_comments_and_whitespace(comp_lines, cidx)
if (gidx == gnum):
if (cidx == cnum):
return comments, cnt
else:
comments += "Found extra lines\n"
comments += "\n".join(comp_lines[cidx:]) + "\n"
return comments, cnt
elif (cidx == cnum):
comments += "Missing lines\n"
comments += "\n".join(gold_lines[gidx:1]) + "\n"
return comments, cnt
gold_value = gold_lines[gidx].strip()
gold_value = gold_value.replace('"',"'")
comp_value = comp_lines[cidx].strip()
comp_value = comp_value.replace('"',"'")
norm_gold_value = _normalize_string_value(gold_value, case)
norm_comp_value = _normalize_string_value(comp_value, case)
if (norm_gold_value != norm_comp_value):
comments += "Inequivalent lines {} != {}\n".format(gold_value, comp_value)
comments += " NORMALIZED: {} != {}\n".format(norm_gold_value, norm_comp_value)
cnt += 1
if offset_method and (norm_gold_value != norm_comp_value):
if gnum > cnum:
gidx += 1
else:
cidx += 1
else:
gidx += 1
cidx += 1
return comments, cnt
###############################################################################
def compare_files(gold_file, compare_file, case=None):
###############################################################################
"""
Returns true if files are the same, comments are returned too:
(success, comments)
"""
expect(os.path.exists(gold_file), "File not found: {}".format(gold_file))
expect(os.path.exists(compare_file), "File not found: {}".format(compare_file))
comments, cnt = _compare_data(open(gold_file, "r").readlines(),
open(compare_file, "r").readlines(), case)
if cnt > 0:
comments2, cnt2 = _compare_data(open(gold_file, "r").readlines(),
open(compare_file, "r").readlines(),
case, offset_method=True)
if cnt2 < cnt:
comments = comments2
return comments == "", comments
###############################################################################
def compare_runconfigfiles(gold_file, compare_file, case=None):
###############################################################################
"""
Returns true if files are the same, comments are returned too:
(success, comments)
"""
expect(os.path.exists(gold_file), "File not found: {}".format(gold_file))
expect(os.path.exists(compare_file), "File not found: {}".format(compare_file))
#create dictionary's of the runconfig files and compare them
gold_dict = _parse_runconfig(gold_file)
compare_dict = _parse_runconfig(compare_file)
comments = findDiff(gold_dict, compare_dict, case=case)
comments = comments.replace(" d1", " " + gold_file)
comments = comments.replace(" d2", " " + compare_file)
# this picks up the case that an entry in compare is not in gold
if comments == "":
comments = findDiff(compare_dict, gold_dict, case=case)
comments = comments.replace(" d2", " " + gold_file)
comments = comments.replace(" d1", " " + compare_file)
return comments == "", comments
def _parse_runconfig(filename):
runconfig = {}
inrunseq = False
insubsection = None
subsection_re = re.compile(r'\s*(\S+)::')
group_re = re.compile(r'\s*(\S+)\s*:\s*(\S+)')
var_re = re.compile(r'\s*(\S+)\s*=\s*(\S+)')
with open(filename, "r") as fd:
for line in fd:
# remove comments
line = line.split('#')[0]
subsection_match = subsection_re.match(line)
group_match = group_re.match(line)
var_match = var_re.match(line)
if re.match(r'\s*runSeq\s*::', line):
runconfig['runSeq'] = []
inrunseq = True
elif re.match(r'\s*::\s*', line):
inrunseq = False
elif inrunseq:
runconfig['runSeq'].append(line)
elif subsection_match:
insubsection = subsection_match.group(1)
runconfig[insubsection] = {}
elif group_match:
runconfig[group_match.group(1)] = group_match.group(2)
elif insubsection and var_match:
runconfig[insubsection][var_match.group(1)] = var_match.group(2)
return runconfig
def findDiff(d1, d2, path="", case=None):
comment = ""
for k in d1.keys():
if not d2.has_key(k):
comment += path + ":\n"
comment += k + " as key not in d2\n"
else:
if type(d1[k]) is dict:
if path == "":
path = k
else:
path = path + "->" + k
comment += findDiff(d1[k],d2[k], path=path, case=case)
else:
if case in d1[k]:
pass
elif "username" in k:
pass
elif "logfile" in k:
pass
elif d1[k] != d2[k]:
comment += path+":\n"
comment += " - {} : {}\n".format(k,d1[k])
comment += " + {} : {}\n".format(k,d2[k])
return comment
```
#### File: CIME/SystemTests/icp.py
```python
from CIME.XML.standard_module_setup import *
from CIME.SystemTests.system_tests_common import SystemTestsCommon
class ICP(SystemTestsCommon):
def __init__(self, case):
"""
initialize an object interface to file env_test.xml in the case directory
"""
SystemTestsCommon.__init__(self, case)
def build_phase(self, sharedlib_only=False, model_only=False):
self._case.set_value("CICE_AUTO_DECOMP", "false")
def run_phase(self):
self._case.set_value("CONTINUE_RUN",False)
self._case.set_value("REST_OPTION","none")
self._case.set_value("HIST_OPTION","$STOP_OPTION")
self._case.set_value("HIST_N","$STOP_N")
self._case.flush()
self.run_indv(self)
```
#### File: CIME/SystemTests/nck.py
```python
from CIME.XML.standard_module_setup import *
from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo
logger = logging.getLogger(__name__)
class NCK(SystemTestsCompareTwo):
def __init__(self, case):
self._comp_classes = []
SystemTestsCompareTwo.__init__(self, case,
separate_builds = True,
run_two_suffix = 'multiinst',
run_one_description = 'one instance',
run_two_description = 'two instances')
def _common_setup(self):
# We start by halving the number of tasks for both cases. This ensures
# that we use the same number of tasks per instance in both cases: For
# the two-instance case, we'll double this halved number, so you may
# think that the halving was unnecessary; but it's needed in case the
# original NTASKS was odd. (e.g., for NTASKS originally 15, we want to
# use NTASKS = int(15/2) * 2 = 14 tasks for case two.)
self._comp_classes = self._case.get_values("COMP_CLASSES")
self._comp_classes.remove("CPL")
for comp in self._comp_classes:
ntasks = self._case.get_value("NTASKS_{}".format(comp))
if ( ntasks > 1 ):
self._case.set_value("NTASKS_{}".format(comp), int(ntasks/2))
# the following assures that both cases use the same number of total tasks
rootpe = self._case.get_value("ROOTPE_{}".format(comp))
if ( rootpe > 1 ):
self._case.set_value("ROOTPE_{}".format(comp), int(rootpe+ntasks/2))
def _case_one_setup(self):
for comp in self._comp_classes:
self._case.set_value("NINST_{}".format(comp), 1)
def _case_two_setup(self):
for comp in self._comp_classes:
if (comp == "ESP"):
self._case.set_value("NINST_{}".format(comp), 1)
else:
self._case.set_value("NINST_{}".format(comp), 2)
ntasks = self._case.get_value("NTASKS_{}".format(comp))
rootpe = self._case.get_value("ROOTPE_{}".format(comp))
if ( rootpe > 1 ):
self._case.set_value("ROOTPE_{}".format(comp), int(rootpe-ntasks))
self._case.set_value("NTASKS_{}".format(comp), ntasks*2)
self._case.case_setup(test_mode=True, reset=True)
```
#### File: CIME/SystemTests/ncr.py
```python
from CIME.XML.standard_module_setup import *
from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo
logger = logging.getLogger(__name__)
class NCR(SystemTestsCompareTwo):
def __init__(self, case):
"""
initialize an NCR test
"""
SystemTestsCompareTwo.__init__(self, case,
separate_builds = True,
run_two_suffix = "singleinst",
run_one_description = "two instances, each with the same number of tasks",
run_two_description = "default build")
def _comp_classes(self):
# Return the components which we need to set things for
# ESP cannot have more than one instance, so don't set anything for it
comp_classes = self._case.get_values("COMP_CLASSES")
if "CPL" in comp_classes:
comp_classes.remove("CPL")
if "ESP" in comp_classes:
comp_classes.remove("ESP")
return comp_classes
def _common_setup(self):
# Set the default number of tasks
for comp in self._comp_classes():
ntasks = self._case.get_value("NTASKS_{}".format(comp))
if ntasks > 1:
self._case.set_value("NTASKS_{}".format(comp), ntasks // 2)
def _case_one_setup(self):
# Set the number of instances, the ROOTPEs, and the number of tasks
# This case should have twice the number of instances and half the number of tasks
# All tasks should be running concurrently
# Note that this case must be the multiinstance one
# to correctly set the required number of nodes and avoid crashing
ntasks_sum = 0
for comp in self._comp_classes():
self._case.set_value("NINST_{}".format(comp), str(2))
self._case.set_value("ROOTPE_{}".format(comp), ntasks_sum)
ntasks = self._case.get_value("NTASKS_{}".format(comp)) * 2
ntasks_sum += ntasks
self._case.set_value("NTASKS_{}".format(comp), ntasks)
# test_mode must be False here so the case.test file is updated
# This ensures that the correct number of nodes are used in case it's larger than in case 2
def _case_two_setup(self):
for comp in self._comp_classes():
self._case.set_value("NINST_{}".format(comp), str(1))
self._case.set_value("ROOTPE_{}".format(comp), 0)
```
#### File: CIME/SystemTests/pgn.py
```python
from __future__ import division
import os
import re
import json
import shutil
import logging
from collections import OrderedDict
from distutils import dir_util
import pandas as pd
import numpy as np
import CIME.test_status
import CIME.utils
from CIME.SystemTests.system_tests_common import SystemTestsCommon
from CIME.case.case_setup import case_setup
from CIME.XML.machines import Machines
import evv4esm # pylint: disable=import-error
from evv4esm.extensions import pg # pylint: disable=import-error
from evv4esm.__main__ import main as evv # pylint: disable=import-error
evv_lib_dir = os.path.abspath(os.path.dirname(evv4esm.__file__))
logger = logging.getLogger(__name__)
NUMBER_INITIAL_CONDITIONS = 6
PERTURBATIONS = OrderedDict([('woprt', 0.0),
('posprt', 1.0e-14),
('negprt', -1.0e-14),
])
FCLD_NC = 'cam.h0.cloud.nc'
INIT_COND_FILE_TEMPLATE = \
"SMS_Ly5.ne4_ne4.FC5AV1C-04P2.eos_intel.ne45y.{}.{}.0002-{:02d}-01-00000.nc"
# FIXME: should 'cam' be 'atm' now?
INSTANCE_FILE_TEMPLATE = '{}cam_{:04d}.h0.0001-01-01-00000{}.nc'
class PGN(SystemTestsCommon):
def __init__(self, case):
"""
initialize an object interface to the PGN test
"""
super(PGN, self).__init__(case)
def build_phase(self, sharedlib_only=False, model_only=False):
ninst = NUMBER_INITIAL_CONDITIONS * len(PERTURBATIONS)
logger.debug('PGN_INFO: number of instance: '+str(ninst))
default_ninst = self._case.get_value("NINST_ATM")
if default_ninst == 1: # if multi-instance is not already set
# Only want this to happen once. It will impact the sharedlib build
# so it has to happen here.
if not model_only:
# Lay all of the components out concurrently
logger.debug("PGN_INFO: Updating NINST for multi-instance in "
"env_mach_pes.xml")
for comp in ['ATM', 'OCN', 'WAV', 'GLC', 'ICE', 'ROF', 'LND']:
ntasks = self._case.get_value("NTASKS_{}".format(comp))
self._case.set_value("ROOTPE_{}".format(comp), 0)
self._case.set_value("NINST_{}".format(comp), ninst)
self._case.set_value("NTASKS_{}".format(comp), ntasks*ninst)
self._case.set_value("ROOTPE_CPL", 0)
self._case.set_value("NTASKS_CPL", ntasks*ninst)
self._case.flush()
case_setup(self._case, test_mode=False, reset=True)
self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only)
logger.debug("PGN_INFO: Updating user_nl_* files")
csmdata_root = self._case.get_value("DIN_LOC_ROOT")
csmdata_atm = os.path.join(csmdata_root, "atm/cam/inic/homme/ne4_v1_init")
csmdata_lnd = os.path.join(csmdata_root, "lnd/clm2/initdata/ne4_v1_init/b58d55680")
iinst = 1
for icond in range(1, NUMBER_INITIAL_CONDITIONS + 1):
fatm_in = os.path.join(csmdata_atm, INIT_COND_FILE_TEMPLATE.format('cam', 'i', icond))
flnd_in = os.path.join(csmdata_lnd, INIT_COND_FILE_TEMPLATE.format('clm2', 'r', icond))
for iprt in PERTURBATIONS.values():
with open('user_nl_cam_{:04d}'.format(iinst), 'w') as atmnlfile, \
open('user_nl_clm_{:04d}'.format(iinst), 'w') as lndnlfile:
atmnlfile.write("ncdata = '{}' \n".format(fatm_in))
lndnlfile.write("finidat = '{}' \n".format(flnd_in))
atmnlfile.write("avgflag_pertape = 'I' \n")
atmnlfile.write("nhtfrq = 1 \n")
atmnlfile.write("mfilt = 2 \n")
atmnlfile.write("ndens = 1 \n")
atmnlfile.write("pergro_mods = .true. \n")
atmnlfile.write("pergro_test_active = .true. \n")
if iprt != 0.0:
atmnlfile.write("pertlim = {} \n".format(iprt))
iinst += 1
self._case.set_value("STOP_N", "1")
self._case.set_value("STOP_OPTION", "nsteps")
def get_var_list(self):
"""
Get variable list for pergro specific output vars
"""
rundir = self._case.get_value("RUNDIR")
prg_fname = 'pergro_ptend_names.txt'
var_file = os.path.join(rundir, prg_fname)
CIME.utils.expect(os.path.isfile(var_file),
"File {} does not exist in: {}".format(prg_fname, rundir))
with open(var_file, 'r') as fvar:
var_list = fvar.readlines()
return list(map(str.strip, var_list))
def _compare_baseline(self):
"""
Compare baselines in the pergro test sense. That is,
compare PGE from the test simulation with the baseline
cloud
"""
with self._test_status:
self._test_status.set_status(CIME.test_status.BASELINE_PHASE,
CIME.test_status.TEST_FAIL_STATUS)
logger.debug("PGN_INFO:BASELINE COMPARISON STARTS")
run_dir = self._case.get_value("RUNDIR")
case_name = self._case.get_value("CASE")
base_dir = os.path.join(self._case.get_value("BASELINE_ROOT"),
self._case.get_value("BASECMP_CASE"))
var_list = self.get_var_list()
test_name = "{}".format(case_name.split('.')[-1])
evv_config = {
test_name: {
"module": os.path.join(evv_lib_dir, "extensions", "pg.py"),
"test-case": case_name,
"test-name": "Test",
"test-dir": run_dir,
"ref-name": "Baseline",
"ref-dir": base_dir,
"variables": var_list,
"perturbations": PERTURBATIONS,
"pge-cld": FCLD_NC,
"ninit": NUMBER_INITIAL_CONDITIONS,
"init-file-template": INIT_COND_FILE_TEMPLATE,
"instance-file-template": INSTANCE_FILE_TEMPLATE,
}
}
json_file = os.path.join(run_dir, '.'.join([case_name, 'json']))
with open(json_file, 'w') as config_file:
json.dump(evv_config, config_file, indent=4)
evv_out_dir = os.path.join(run_dir, '.'.join([case_name, 'evv']))
evv(['-e', json_file, '-o', evv_out_dir])
with open(os.path.join(evv_out_dir, 'index.json'), 'r') as evv_f:
evv_status = json.load(evv_f)
comments = ""
for evv_elem in evv_status['Data']['Elements']:
if evv_elem['Type'] == 'ValSummary' \
and evv_elem['TableTitle'] == 'Perturbation growth test':
comments = "; ".join("{}: {}".format(key, val) for key, val
in evv_elem['Data'][test_name][''].items())
if evv_elem['Data'][test_name]['']['Test status'].lower() == 'pass':
self._test_status.set_status(CIME.test_status.BASELINE_PHASE,
CIME.test_status.TEST_PASS_STATUS)
break
status = self._test_status.get_status(CIME.test_status.BASELINE_PHASE)
mach_name = self._case.get_value("MACH")
mach_obj = Machines(machine=mach_name)
htmlroot = CIME.utils.get_htmlroot(mach_obj)
urlroot = CIME.utils.get_urlroot(mach_obj)
if htmlroot is not None:
with CIME.utils.SharedArea():
dir_util.copy_tree(evv_out_dir, os.path.join(htmlroot, 'evv', case_name), preserve_mode=False)
if urlroot is None:
urlroot = "[{}_URL]".format(mach_name.capitalize())
viewing = "{}/evv/{}/index.html".format(urlroot, case_name)
else:
viewing = "{}\n" \
" EVV viewing instructions can be found at: " \
" https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/" \
"climate_reproducibility/README.md#test-passfail-and-extended-output" \
"".format(evv_out_dir)
comments = "{} {} for test '{}'.\n" \
" {}\n" \
" EVV results can be viewed at:\n" \
" {}".format(CIME.test_status.BASELINE_PHASE, status, test_name, comments, viewing)
CIME.utils.append_testlog(comments, self._orig_caseroot)
def run_phase(self):
logger.debug("PGN_INFO: RUN PHASE")
self.run_indv()
# Here were are in case directory, we need to go to the run directory
# and rename files
rundir = self._case.get_value("RUNDIR")
casename = self._case.get_value("CASE")
logger.debug("PGN_INFO: Case name is:{}".format(casename))
for icond in range(NUMBER_INITIAL_CONDITIONS):
for iprt, (prt_name, prt_value) in enumerate(PERTURBATIONS.items()):
iinst = pg._sub2instance(icond, iprt, len(PERTURBATIONS))
fname = os.path.join(rundir, INSTANCE_FILE_TEMPLATE.format(casename + '.', iinst, ''))
renamed_fname = re.sub(r'\.nc$', '_{}.nc'.format(prt_name), fname)
logger.debug("PGN_INFO: fname to rename:{}".format(fname))
logger.debug("PGN_INFO: Renamed file:{}".format(renamed_fname))
try:
shutil.move(fname, renamed_fname)
except IOError:
CIME.utils.expect(os.path.isfile(renamed_fname),
"ERROR: File {} does not exist".format(renamed_fname))
logger.debug("PGN_INFO: Renamed file already exists:"
"{}".format(renamed_fname))
logger.debug("PGN_INFO: RUN PHASE ENDS")
def _generate_baseline(self):
super(PGN, self)._generate_baseline()
basegen_dir = os.path.join(self._case.get_value("BASELINE_ROOT"),
self._case.get_value("BASEGEN_CASE"))
rundir = self._case.get_value("RUNDIR")
casename = self._case.get_value("CASE")
var_list = self.get_var_list()
nvar = len(var_list)
nprt = len(PERTURBATIONS)
rmse_prototype = {}
for icond in range(NUMBER_INITIAL_CONDITIONS):
prt_rmse = {}
for iprt, prt_name in enumerate(PERTURBATIONS):
if prt_name == 'woprt':
continue
iinst_ctrl = pg._sub2instance(icond, 0, nprt)
ifile_ctrl = os.path.join(rundir,
INSTANCE_FILE_TEMPLATE.format(casename + '.', iinst_ctrl, '_woprt'))
iinst_test = pg._sub2instance(icond, iprt, nprt)
ifile_test = os.path.join(rundir,
INSTANCE_FILE_TEMPLATE.format(casename + '.', iinst_test, '_' + prt_name))
prt_rmse[prt_name] = pg.variables_rmse(ifile_test, ifile_ctrl, var_list, 't_')
rmse_prototype[icond] = pd.concat(prt_rmse)
rmse = pd.concat(rmse_prototype)
cld_rmse = np.reshape(rmse.RMSE.values, (NUMBER_INITIAL_CONDITIONS, nprt - 1, nvar))
pg.rmse_writer(os.path.join(rundir, FCLD_NC),
cld_rmse, list(PERTURBATIONS.keys()), var_list, INIT_COND_FILE_TEMPLATE)
logger.debug("PGN_INFO:copy:{} to {}".format(FCLD_NC, basegen_dir))
shutil.copy(os.path.join(rundir, FCLD_NC), basegen_dir)
```
#### File: CIME/XML/env_run.py
```python
from CIME.XML.standard_module_setup import *
from CIME.XML.env_base import EnvBase
logger = logging.getLogger(__name__)
class EnvRun(EnvBase):
def __init__(self, case_root=None, infile="env_run.xml", components=None, read_only=False):
"""
initialize an object interface to file env_run.xml in the case directory
"""
self._components = components
schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_entry_id.xsd")
EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only)
```
#### File: CIME/XML/namelist_definition.py
```python
import re
import collections
from CIME.namelist import fortran_namelist_base_value, \
is_valid_fortran_namelist_literal, character_literal_to_string, \
expand_literal_list, Namelist, get_fortran_name_only
from CIME.XML.standard_module_setup import *
from CIME.XML.entry_id import EntryID
from CIME.XML.files import Files
logger = logging.getLogger(__name__)
_array_size_re = re.compile(r'^(?P<type>[^(]+)\((?P<size>[^)]+)\)$')
class CaseInsensitiveDict(dict):
"""Basic case insensitive dict with strings only keys.
From https://stackoverflow.com/a/27890005 """
proxy = {}
def __init__(self, data):
dict.__init__(self)
self.proxy = dict((k.lower(), k) for k in data)
for k in data:
self[k] = data[k]
def __contains__(self, k):
return k.lower() in self.proxy
def __delitem__(self, k):
key = self.proxy[k.lower()]
super(CaseInsensitiveDict, self).__delitem__(key)
del self.proxy[k.lower()]
def __getitem__(self, k):
key = self.proxy[k.lower()]
return super(CaseInsensitiveDict, self).__getitem__(key)
def get(self, k, default=None):
return self[k] if k in self else default
def __setitem__(self, k, v):
super(CaseInsensitiveDict, self).__setitem__(k, v)
self.proxy[k.lower()] = k
class NamelistDefinition(EntryID):
"""Class representing variable definitions for a namelist.
This class inherits from `EntryID`, and supports most inherited methods;
however, `set_value` is unsupported.
Additional public methods:
- dict_to_namelist.
- is_valid_value
- validate
"""
def __init__(self, infile, files=None):
"""Construct a `NamelistDefinition` from an XML file."""
# if the file is invalid we may not be able to check the version
# but we need to do it this way until we remove the version 1 files
schema = None
if files is None:
files = Files()
schema = files.get_schema("NAMELIST_DEFINITION_FILE")
expect(os.path.isfile(infile), "File {} does not exist".format(infile))
super(NamelistDefinition, self).__init__(infile, schema=schema)
self._attributes = {}
self._entry_nodes = []
self._entry_ids = []
self._valid_values = {}
self._entry_types = {}
self._group_names = CaseInsensitiveDict({})
self._nodes = {}
def set_nodes(self, skip_groups=None):
"""
populates the object data types for all nodes that are not part of the skip_groups array
returns nodes that do not have attributes of `skip_default_entry` or `per_stream_entry`
"""
default_nodes = []
for node in self.get_children("entry"):
name = self.get(node, "id")
expect(name == name.lower(),"ERROR id field in this file must be lowercase, id={}".format(name))
skip_default_entry = self.get(node, "skip_default_entry") == "true"
per_stream_entry = self.get(node, "per_stream_entry") == "true"
set_node_values = False
if skip_groups:
group_name = self._get_group_name(node)
if not group_name in skip_groups:
self._entry_nodes.append(node)
set_node_values = True
if not skip_default_entry and not per_stream_entry:
default_nodes.append(node)
else:
self._entry_nodes.append(node)
set_node_values = True
if not skip_default_entry and not per_stream_entry:
default_nodes.append(node)
if set_node_values:
self._entry_nodes.append(node)
self._entry_ids.append(name)
self._nodes[name] = node
self._entry_types[name] = self._get_type(node)
self._valid_values[name] = self._get_valid_values(node)
self._group_names[name] = self._get_group_name(node)
return default_nodes
def _get_group_name(self, node=None):
if self.get_version() == 1.0:
group = self.get(node, 'group')
elif self.get_version() >= 2.0:
group = self.get_element_text("group", root=node)
return(group)
def _get_type(self, node):
if self.get_version() == 1.0:
type_info = self.get(node, 'type')
elif self.get_version() >= 2.0:
type_info = self._get_type_info(node)
return(type_info)
def _get_valid_values(self, node):
# The "valid_values" attribute is not required, and an empty string has
# the same effect as not specifying it.
# Returns a list from a comma seperated string in xml
valid_values = ''
if self.get_version() == 1.0:
valid_values = self.get(node, 'valid_values')
elif self.get_version() >= 2.0:
valid_values = self._get_node_element_info(node, "valid_values")
if valid_values == '':
valid_values = None
if valid_values is not None:
valid_values = valid_values.split(',')
return valid_values
def get_group(self, name):
return self._group_names[name]
def add_attributes(self, attributes):
self._attributes = attributes
def get_entry_nodes(self):
return self._entry_nodes
def get_per_stream_entries(self):
entries = []
nodes = self.get_children("entry")
for node in nodes:
per_stream_entry = self.get(node, "per_stream_entry") == "true"
if per_stream_entry:
entries.append(self.get(node, "id"))
return entries
# Currently we don't use this object to construct new files, and it's no
# good for that purpose anyway, so stop this function from being called.
def set_value(self, vid, value, subgroup=None, ignore_type=True):
"""This function is not implemented."""
raise TypeError("NamelistDefinition does not support `set_value`.")
def get_value_match(self, vid, attributes=None, exact_match=True, entry_node=None):
"""Return the default value for the variable named `vid`.
The return value is a list of strings corresponding to the
comma-separated list of entries for the value (length 1 for scalars). If
there is no default value in the file, this returns `None`.
"""
# Merge internal attributes with those passed in.
all_attributes = {}
if self._attributes is not None:
all_attributes.update(self._attributes)
if attributes is not None:
all_attributes.update(attributes)
if entry_node is None:
entry_node = self._nodes[vid]
value = super(NamelistDefinition, self).get_value_match(vid.lower(),attributes=all_attributes, exact_match=exact_match,
entry_node=entry_node)
if value is None:
value = ''
else:
value = self._split_defaults_text(value)
return value
@staticmethod
def _split_defaults_text(string):
"""Take a comma-separated list in a string, and split it into a list."""
# Some trickiness here; we want to split items on commas, but not inside
# quote-delimited strings. Stripping whitespace is also useful.
value = []
if len(string):
pos = 0
delim = None
for i, char in enumerate(string):
if delim is None:
# If not inside a string...
if char in ('"', "'"):
# if we have a quote character, start a string.
delim = char
elif char == ',':
# if we have a comma, this is a new value.
value.append(string[pos:i].strip())
pos = i+1
else:
# If inside a string, the only thing that can happen is the end
# of the string.
if char == delim:
delim = None
value.append(string[pos:].strip())
return value
def split_type_string(self, name):
"""Split a 'type' attribute string into its component parts.
The `name` argument is the variable name.
This is used for error reporting purposes.
The return value is a tuple consisting of the type itself, a length
(which is an integer for character variables, otherwise `None`), and the
size of the array (which is 1 for scalar variables).
"""
type_string = self._entry_types[name]
# 'char' is frequently used as an abbreviation of 'character'.
type_string = type_string.replace('char', 'character')
# Separate into a size and the rest of the type.
size_match = _array_size_re.search(type_string)
if size_match:
type_string = size_match.group('type')
size_string = size_match.group('size')
try:
size = int(size_string)
except ValueError:
expect(False,
"In namelist definition, variable {} had the non-integer string {!r} specified as an array size.".format(name, size_string))
else:
size = 1
# Separate into a type and an optional length.
type_, star, length = type_string.partition('*')
if star == '*':
# Length allowed only for character variables.
expect(type_ == 'character',
"In namelist definition, length specified for non-character "
"variable {}.".format(name))
# Check that the length is actually an integer, to make the error
# message a bit cleaner if the xml input is bad.
try:
max_len = int(length)
except ValueError:
expect(False,
"In namelist definition, character variable {} had the non-integer string {!r} specified as a length.".format(name, length))
else:
max_len = None
return type_, max_len, size
@staticmethod
def _canonicalize_value(type_, value):
"""Create 'canonical' version of a value for comparison purposes."""
canonical_value = [fortran_namelist_base_value(scalar)
for scalar in value]
canonical_value = [scalar for scalar in canonical_value if scalar != '']
if type_ == 'character':
canonical_value = [character_literal_to_string(scalar)
for scalar in canonical_value]
elif type_ == 'integer':
canonical_value = [int(scalar) for scalar in canonical_value]
return canonical_value
def is_valid_value(self, name, value):
"""Determine whether a value is valid for the named variable.
The `value` argument must be a list of strings formatted as they would
appear in the namelist (even for scalar variables, in which case the
length of the list is always 1).
"""
# Separate into a type, optional length, and optional size.
type_, max_len, size = self.split_type_string(name)
invalid = []
# Check value against type.
for scalar in value:
if not is_valid_fortran_namelist_literal(type_, scalar):
invalid.append(scalar)
if len(invalid) > 0:
logger.warning("Invalid values {}".format(invalid))
return False
# Now that we know that the strings as input are valid Fortran, do some
# canonicalization for further checks.
canonical_value = self._canonicalize_value(type_, value)
# Check maximum length (if applicable).
if max_len is not None:
for scalar in canonical_value:
if len(scalar) > max_len:
return False
# Check valid value constraints (if applicable).
valid_values = self._valid_values[name]
if valid_values is not None:
expect(type_ in ('integer', 'character'),
"Found valid_values attribute for variable {} with type {}, but valid_values only allowed for character and integer variables.".format(name, type_))
if type_ == 'integer':
compare_list = [int(vv) for vv in valid_values]
else:
compare_list = valid_values
for scalar in canonical_value:
if scalar not in compare_list:
invalid.append(scalar)
if len(invalid) > 0:
logger.warning("Invalid values {}".format(invalid))
return False
# Check size of input array.
if len(expand_literal_list(value)) > size:
expect(False, "Value index exceeds variable size for variable {}, allowed array length is {} value array size is {}".format(name, size, len(expand_literal_list(value))))
return True
def _expect_variable_in_definition(self, name, variable_template):
"""Used to get a better error message for an unexpected variable.
case insensitve match"""
expect(name in self._entry_ids,
(variable_template + " is not in the namelist definition.").format(str(name)))
def _user_modifiable_in_variable_definition(self, name):
# Is name user modifiable?
node = self.get_optional_child("entry", attributes={'id': name})
user_modifiable_only_by_xml = self.get(node, 'modify_via_xml')
if user_modifiable_only_by_xml is not None:
expect(False,
"Cannot change {} in user_nl file: set via xml variable {}".format(name, user_modifiable_only_by_xml))
user_cannot_modify = self.get(node, 'cannot_modify_by_user_nl')
if user_cannot_modify is not None:
expect(False,
"Cannot change {} in user_nl file: {}".format(name, user_cannot_modify))
def _generate_variable_template(self, filename):
# Improve error reporting when a file name is provided.
if filename is None:
variable_template = "Variable {!r}"
else:
# for the next step we want the name of the original user_nl file not the internal one
# We do this by extracting the component name from the filepath string
if "Buildconf" in filename and "namelist_infile" in filename:
msgfn = "user_nl_" + (filename.split(os.sep)[-2])[:-4]
else:
msgfn = filename
variable_template = "Variable {!r} from file " + repr(str(msgfn))
return variable_template
def validate(self, namelist,filename=None):
"""Validate a namelist object against this definition.
The optional `filename` argument can be used to assist in error
reporting when the namelist comes from a specific, known file.
"""
variable_template = self._generate_variable_template(filename)
# Iterate through variables.
for group_name in namelist.get_group_names():
for variable_name in namelist.get_variable_names(group_name):
# Check that the variable is defined...
qualified_variable_name = get_fortran_name_only(variable_name)
self._expect_variable_in_definition(qualified_variable_name, variable_template)
# Check if can actually change this variable via filename change
if filename is not None:
self._user_modifiable_in_variable_definition(qualified_variable_name)
# and has the right group name...
var_group = self.get_group(qualified_variable_name)
expect(var_group == group_name,
(variable_template + " is in a group named {!r}, but should be in {!r}.").format(str(variable_name), str(group_name), str(var_group)))
# and has a valid value.
value = namelist.get_variable_value(group_name, variable_name)
expect(self.is_valid_value(qualified_variable_name, value),
(variable_template + " has invalid value {!r}.").format(str(variable_name), [str(scalar) for scalar in value]))
def dict_to_namelist(self, dict_, filename=None):
"""Converts a dictionary of name-value pairs to a `Namelist`.
The input is assumed to be similar to the output of `parse` when
`groupless=True` is set. This function uses the namelist definition file
to look up the namelist group associated with each variable, and uses
this information to create a true `Namelist` object.
The optional `filename` argument can be used to assist in error
reporting when the namelist comes from a specific, known file.
"""
# Improve error reporting when a file name is provided.
variable_template = self._generate_variable_template(filename)
groups = {}
for variable_name in dict_:
variable_lc = variable_name.lower()
qualified_varname = get_fortran_name_only(variable_lc)
self._expect_variable_in_definition(qualified_varname, variable_template)
group_name = self.get_group(qualified_varname)
expect (group_name is not None, "No group found for var {}".format(variable_lc))
if group_name not in groups:
groups[group_name] = collections.OrderedDict()
groups[group_name][variable_lc] = dict_[variable_name]
return Namelist(groups)
def get_input_pathname(self, name):
node = self._nodes[name]
if self.get_version() == 1.0:
input_pathname = self.get(node, 'input_pathname')
elif self.get_version() >= 2.0:
input_pathname = self._get_node_element_info(node, "input_pathname")
return(input_pathname)
# pylint: disable=arguments-differ
def get_default_value(self, item, attribute=None):
"""Return the default value for the variable named `item`.
The return value is a list of strings corresponding to the
comma-separated list of entries for the value (length 1 for scalars). If
there is no default value in the file, this returns `None`.
"""
# Merge internal attributes with those passed in.
all_attributes = {}
if self._attributes is not None:
all_attributes.update(self._attributes)
if attribute is not None:
all_attributes.update(attribute)
value = self.get_value_match(item.lower(), all_attributes, True)
return self._split_defaults_text(value)
```
#### File: CIME/XML/stream.py
```python
from CIME.XML.standard_module_setup import *
from CIME.XML.generic_xml import GenericXML
from CIME.XML.files import Files
from CIME.utils import expect
logger = logging.getLogger(__name__)
class Stream(GenericXML):
def __init__(self, infile=None, files=None):
"""
initialize an object
"""
if files is None:
files = Files()
schema = None
GenericXML.__init__(self, infile, schema=schema)
def get_value(self, item, attribute=None, resolved=True, subgroup=None):
"""
Get Value of fields in a stream.xml file
"""
expect(subgroup is None, "This class does not support subgroups")
value = None
node = None
names = item.split('/')
node = None
for name in names:
node = self.scan_child(name, root=node)
if node is not None:
value = self.text(node).strip()
if value is None:
# if all else fails
#pylint: disable=assignment-from-none
value = GenericXML.get_value(self, item, attribute, resolved, subgroup)
if resolved:
if value is not None:
value = self.get_resolved_value(value)
elif item in os.environ:
value = os.environ[item]
return value
```
#### File: scripts/Tools/testreporter.py
```python
from standard_script_setup import *
from CIME.XML.env_build import EnvBuild
from CIME.XML.env_case import EnvCase
from CIME.XML.env_test import EnvTest
from CIME.XML.test_reporter import TestReporter
from CIME.utils import expect
from CIME.XML.generic_xml import GenericXML
import glob
###############################################################################
def parse_command_line(args):
###############################################################################
parser = argparse.ArgumentParser()
CIME.utils.setup_standard_logging_options(parser)
# Parse command line options
#parser = argparse.ArgumentParser(description='Arguements for testreporter')
parser.add_argument("--tagname",
help="Name of the tag being tested.")
parser.add_argument("--testid",
help="Test id, ie c2_0_a6g_ing,c2_0_b6g_gnu.")
parser.add_argument("--testroot",
help="Root directory for tests to populate the database.")
parser.add_argument("--testtype",
help="Type of test, prealpha or prebeta.")
parser.add_argument("--dryrun",action="store_true",
help="Do a dry run, database will not be populated.")
parser.add_argument("--dumpxml",action="store_true",
help="Dump XML test results to sceen.")
args = parser.parse_args()
CIME.utils.parse_args_and_handle_standard_logging_options(args)
return args.testroot, args.testid, args.tagname, args.testtype, args.dryrun, args.dumpxml
###############################################################################
def get_testreporter_xml(testroot, testid, tagname, testtype):
###############################################################################
os.chdir(testroot)
#
# Retrieve compiler name and mpi library
#
xml_file=glob.glob("*"+testid+"/env_build.xml")
expect(len(xml_file) > 0, "Tests not found. It's possible your testid, {} is wrong.".format(testid))
envxml=(EnvBuild(".",infile=xml_file[0]))
compiler=envxml.get_value("COMPILER")
mpilib=envxml.get_value("MPILIB")
#
# Retrieve machine name
#
xml_file=glob.glob("*"+testid+"/env_case.xml")
envxml=(EnvCase(".",infile=xml_file[0]))
machine=envxml.get_value("MACH")
#
# Retrieve baseline tag to compare to
#
xml_file=glob.glob("*"+testid+"/env_test.xml")
envxml=(EnvTest(".",infile=xml_file[0]))
baseline = envxml.get_value("BASELINE_NAME_CMP")
#
# Create XML header
#
testxml=TestReporter()
testxml.setup_header(tagname,machine,compiler,mpilib,testroot,testtype,baseline)
#
# Create lists on tests based on the testid in the testroot directory.
#
test_names=glob.glob("*"+testid)
#
# Loop over all tests and parse the test results
#
test_status={}
for test_name in test_names:
if not os.path.isfile(test_name+"/TestStatus"):
continue
test_status['COMMENT']=""
test_status['BASELINE']='----'
test_status['MEMCOMP']='----'
test_status['MEMLEAK']='----'
test_status['NLCOMP']='----'
test_status['STATUS']='----'
test_status['TPUTCOMP']='----'
#
# Check to see if TestStatus is present, if not then continue
# I might want to set the status to fail
#
try:
lines = [line.rstrip('\n') for line in open(test_name+"/TestStatus")]
except (IOError, OSError):
test_status['STATUS']="FAIL"
test_status['COMMENT']="TestStatus missing. "
continue
#
# Loop over each line of TestStatus, and check for different types of failures.
#
for line in lines:
if "NLCOMP" in line:
test_status['NLCOMP']=line[0:4]
if "MEMLEAK" in line:
test_status['MEMLEAK']=line[0:4]
if "MEMCOMP" in line:
test_status['MEMCOMP']=line[0:4]
if "BASELINE" in line:
test_status['BASELINE']=line[0:4]
if "TPUTCOMP" in line:
test_status['TPUTCOMP']=line[0:4]
if "FAIL PFS" in line:
test_status['STATUS']="FAIL"
if "INIT" in line:
test_status['INIT']=line[0:4]
if line[0:4] in ("FAIL","PEND"):
test_status['STATUS']="SFAIL"
test_status['COMMENT']+="INIT fail! "
break
if "CREATE_NEWCASE" in line:
test_status['CREATE_NEWCASE']=line[0:4]
if line[0:4] in ("FAIL","PEND"):
test_status['STATUS']="SFAIL"
test_status['COMMENT']+="CREATE_NEWCASE fail! "
break
if "XML" in line:
test_status['XML']=line[0:4]
if line[0:4] in ("FAIL","PEND"):
test_status['STATUS']="SFAIL"
test_status['COMMENT']+="XML fail! "
break
if "SETUP" in line:
test_status['SETUP']=line[0:4]
if line[0:4] in ("FAIL","PEND"):
test_status['STATUS']="SFAIL"
test_status['COMMENT']+="SETUP fail! "
break
if "SHAREDLIB_BUILD" in line:
test_status['SHAREDLIB_BUILD']=line[0:4]
if line[0:4] in ("FAIL","PEND"):
test_status['STATUS']="CFAIL"
test_status['COMMENT']+="SHAREDLIB_BUILD fail! "
break
if "MODEL_BUILD" in line:
test_status['MODEL_BUILD']=line[0:4]
if line[0:4] in ("FAIL","PEND"):
test_status['STATUS']="CFAIL"
test_status['COMMENT']+="MODEL_BUILD fail! "
break
if "SUBMIT" in line:
test_status['STATUS']=line[0:4]
if line[0:4] in ("FAIL","PEND"):
test_status['COMMENT']+="SUBMIT fail! "
break
if "RUN" in line:
test_status['STATUS']=line[0:4]
if line[0:4] in ("FAIL","PEND"):
test_status['COMMENT']+="RUN fail! "
break
if "COMPARE_base_rest" in line:
test_status['STATUS']=line[0:4]
if line[0:4] in ("FAIL","PEND"):
test_status['COMMENT']+="Restart fail! "
break
if "COMPARE_base_hybrid" in line:
test_status['STATUS']=line[0:4]
if line[0:4] in ("FAIL","PEND"):
test_status['COMMENT']+="Hybrid fail! "
break
if "COMPARE_base_multiinst" in line:
test_status['STATUS']=line[0:4]
if line[0:4] in ("FAIL","PEND"):
test_status['COMMENT']+="Multi instance fail! "
break
if "COMPARE_base_test" in line:
test_status['STATUS']=line[0:4]
if line[0:4] in ("FAIL","PEND"):
test_status['COMMENT']+="Base test fail! "
break
if "COMPARE_base_single_thread" in line:
test_status['STATUS']=line[0:4]
if line[0:4] in ("FAIL","PEND"):
test_status['COMMENT']+="Thread test fail! "
break
#
# Do not include time comments. Just a preference to have cleaner comments in the test database
#
try:
if 'time=' not in line and 'GENERATE' not in line:
if 'BASELINE' not in line:
test_status['COMMENT']+=line.split(' ',3)[3]+' '
else:
test_status['COMMENT']+=line.split(' ',4)[4]+' '
except Exception: # Probably want to be more specific here
pass
#
# Fill in the xml with the test results
#
testxml.add_result(test_name,test_status)
return testxml
##############################################################################
def _main_func():
###############################################################################
testroot, testid, tagname, testtype, dryrun, dumpxml = parse_command_line(sys.argv)
testxml = get_testreporter_xml(testroot, testid, tagname, testtype)
#
# Dump xml to a file.
#
if dumpxml:
GenericXML.write(testxml,outfile="TestRecord.xml")
#
# Prompt for username and password, then post the XML string to the test database website
#
if not dryrun:
testxml.push2testdb()
###############################################################################
if __name__ == "__main__":
_main_func()
```
#### File: tools/load_balancing_tool/optimize_model.py
```python
import sys
import copy
import logging
import operator
import importlib
from CIME.utils import expect
try:
import pulp
except ImportError, e:
sys.stderr.write("pulp library not installed or located. "
"Try pip install [--user] pulp\n")
raise e
logger = logging.getLogger(__name__)
def solver_factory(data):
"""
load data either from a json file or dictionary
"""
expect(data.has_key('totaltasks'),"totaltasks not found in data")
layout = data['layout']
sp = layout.rsplit('.', 1)
try:
if len(sp) > 1:
layout_module = importlib.import_module(sp[0])
layout = sp[1]
else:
import layouts
layout_module = layouts
except ImportError:
expect(False,"cannot import %s\n")
try:
solverclass = getattr(layout_module, layout)
except KeyError:
expect(False, "layout class %s not found in %s\n",
layout, layout_module)
solver = solverclass()
for c in solver.get_required_components():
assert data.has_key(c), "ERROR: component %s not found in data" % c
solver.set_data(data)
return solver
class ModelData:
"""
Convert dictionary data entry into usable object
"""
def __init__(self, name, model_dict):
self.name = name
self.blocksize = model_dict['blocksize']
self.nthrds = model_dict['nthrds'][0]
ntasks = copy.deepcopy(model_dict['ntasks'])
cost = copy.deepcopy(model_dict['cost'])
assert len(ntasks) == len(cost), "ntasks data not same length as cost for %s" % name
# sort smallest ntasks to largest
tup = zip(*sorted(zip(cost, ntasks),
key=operator.itemgetter(1)))
self.cost = list(tup[0])
self.ntasks = list(tup[1])
for j in self.ntasks:
if j > 1 and j % self.blocksize:
logger.warning("WARNING: %s pe %d not divisible by "
"blocksize %d. Results may be invalid\n",
name, j, self.blocksize)
class OptimizeModel(object):
STATE_UNDEFINED = 0
STATE_UNSOLVED = 1
STATE_SOLVED_OK = 2
STATE_SOLVED_BAD = 3
states = ['Undefined', 'Unsolved', 'Solved', 'No Solution']
def __init__(self):
self.models = {}
self.state = self.STATE_UNDEFINED
self.X = {}
self.constraints = []
self.maxtasks = 0
def set_data(self, data_dict):
"""
Add data to the model.
data_dict is dictionary of components with their data
example: {'totaltasks':64
'ICE': {'ntasks': [2,4,8],
'costs': [10.0,6.0,4.0],
'nthrds': [1,1,1],
'blocksize': 8}
'LND': {...}
}
data is extrapolated as needed for n=1 and n=totaltasks
sets state to STATE_UNSOLVED
"""
# get deep copy, because we need to divide ntasks by blocksize
self.maxtasks = data_dict['totaltasks']
for key in data_dict:
if isinstance(data_dict[key], dict) and 'ntasks' in data_dict[key]:
self.models[key] = ModelData(key, data_dict[key])
# extrapolate for n=1 and n=maxtasks
for m in self.models.values():
m.extrapolated = [False] * len(m.cost)
# add in data for ntasks=1 if not provided
if m.ntasks[0] > 1:
m.cost.insert(0, m.ntasks[0] * m.cost[0])
m.ntasks.insert(0, 1)
m.extrapolated.insert(0, True)
# add in data for maxtasks if not available
# assume same scaling factor as previous interval
if len(m.ntasks) > 1 and m.ntasks[-1] < self.maxtasks:
if m.cost[-2] <= 0.0:
factor = 1.0
elif len(m.ntasks) > 1:
factor = (1.0 - m.cost[-1]/m.cost[-2]) / \
(1.0 - 1. * m.ntasks[-2] / m.ntasks[-1])
else:
# not much information to go on ...
factor = 1.0
m.cost.append(m.cost[-1] * (1.0 - factor +
factor * m.ntasks[-1] / self.maxtasks))
m.ntasks.append(self.maxtasks)
m.extrapolated.append(True)
self.check_requirements()
self.state = self.STATE_UNSOLVED
def add_model_constraints(self):
"""
Build constraints based on the cost vs ntask models
This should be the same for any layout so is provided in base class
Assumes cost variables are 'Txxx' and ntask variables are 'Nxxx'
"""
assert self.state != self.STATE_UNDEFINED,\
"set_data() must be called before add_model_constraints()"
for k in self.get_required_components():
m = self.models[k]
tk = 'T' + k.lower() # cost(time) key
nk = 'N' + k.lower() # nprocs key
for i in range(0, len(m.cost) - 1):
slope = (m.cost[i+1] - m.cost[i]) / (1. * m.ntasks[i+1] - m.ntasks[i])
self.constraints.append([self.X[tk] - slope * self.X[nk] >= \
m.cost[i] - slope * m.ntasks[i],
"T%s - %f*N%s >= %f" % \
(k.lower(), slope, k.lower(),
m.cost[i] - slope * m.ntasks[i])])
if slope > 0:
logger.warning("WARNING: Nonconvex cost function for model "
"%s. Review costs to ensure data is correct "
"(--graph_models or --print_models)", k)
break
if slope == 0:
break
def get_required_components(self):
"""
Should be overridden by derived class. Return a list of required
components (capitalized) used in the layout.
Example: return ['ATM', 'LND', 'ICE']
"""
return []
def check_requirements(self):
"""
Check to make sure that each element of the subclass's list of
required components has some data provided.
"""
for r in self.get_required_components():
if r not in self.models:
logger.critical("Data for component %s not available", r)
def write_timings(self, fd=sys.stdout, level=logging.DEBUG):
"""
Print out the data used for the ntasks/cost models.
Can be used to check that the data provided to the
model is reasonable. Also see graph_costs()
"""
assert self.state != self.STATE_UNDEFINED,\
"set_data() must be called before write_timings()"
for k in self.models:
m = self.models[k]
message = "***%s***" % k
if fd is not None:
fd.write("\n" + message + "\n")
logger.log(level, message)
for i in range(len(m.cost)):
extra = ""
if m.extrapolated[i]:
extra = " (extrapolated)"
message = "%4d: %f%s" % \
(m.ntasks[i], m.cost[i], extra)
if fd is not None:
fd.write(message + "\n")
logger.log(level, message)
def graph_costs(self):
"""
Use matplotlib to graph the ntasks/cost data.
This provides a quick visual to check that the
data used for the optimization is reasonable.
If matplotlib is not available, nothing will happen
"""
assert self.state != self.STATE_UNDEFINED,\
"set_data() must be called before graph_costs()"
try:
import matplotlib.pyplot as pyplot
except ImportError:
logger.info("matplotlib not found, skipping graphs")
return
nplots = len(self.models)
nrows = (nplots + 1) / 2
ncols = 2
fig, ax = pyplot.subplots(nrows, ncols)
row = 0; col = 0
for k in self.models:
m = self.models[k]
p = ax[row, col]
p.loglog(m.ntasks, m.cost, 'k-')
for i in range(len(m.ntasks)):
if not m.extrapolated[i]:
p.plot(m.ntasks[i], m.cost[i], 'bx')
else:
p.plot(m.ntasks[i], m.cost[i], 'rx')
p.set_title(m.name)
p.set_xlabel('ntasks')
p.set_ylabel('cost (s/mday)')
p.set_xlim([1, self.maxtasks])
row += 1
if row == nrows:
row = 0
col += 1
fig.suptitle("log-log plot of Cost/mday vs ntasks for designated "
"components.\nPerfectly scalable components would have a "
"straight line. Blue 'X's designate points\nfrom data, "
"red 'X's designate extrapolated data. Areas above the "
"line plots represent\nthe feasible region. Global "
"optimality of solution depends on the convexity of "
"these line plots.\nClose graph to continue on to solve.")
fig.tight_layout()
fig.subplots_adjust(top=0.75)
logger.info("close graph window to continue")
pyplot.show()
def optimize(self):
"""
Run the optimization.
Must set self.state using LpStatus object:
LpStatusOptimal -> STATE_SOLVED_OK
LpStatusNotSolved -> STATE_UNSOLVED
LpStatusInfeasible -> STATE_SOLVED_BAD
LpStatusUnbounded -> STATE_SOLVED_BAD
LpStatusUndefined -> STATE_UNDEFINED
-- use self.set_state(lpstatus) --
Returns state
If solved, then solution will be stored in self.X dictionary, indexed
by variable name. Suggested convention:
'Tice', 'Tlnd', ... for cost per component
'Nice', 'Nlnd', ... for ntasks per component
'NBice', 'NBlnd', ... for number of blocks per component
The default implementation of get_solution() returns a dictionary
of these variable keys and their values.
"""
raise NotImplementedError
def get_solution(self):
"""
Return a dictionary of the solution variables, can be overridden.
Default implementation returns values in self.X
"""
assert self.state == self.STATE_SOLVED_OK,\
"solver failed, no solution available"
retval = {}
if hasattr(self,'X') and isinstance(self.X, dict):
for k in self.X:
retval[k] = self.X[k].varValue
return retval
def set_state(self, lpstatus):
if lpstatus == pulp.constants.LpStatusOptimal:
self.state = self.STATE_SOLVED_OK
elif lpstatus == pulp.constants.LpStatusNotSolved:
self.state = self.STATE_UNSOLVED
elif lpstatus == pulp.constants.LpStatusUndefined:
self.state = self.STATE_UNDEFINED
else:
self.state = self.STATE_SOLVED_BAD
def get_state(self):
return self.state
def get_state_string(self, state):
return self.states[state]
def write_pe_file(self, pefilename):
raise NotImplementedError
def write_xml_changes(self, outfile):
"""
Write out a list of xmlchange commands to implement
the optimal layout
"""
raise NotImplementedError
def write_pe_template(self, pefilename, ntasks, nthrds, roots):
from distutils.spawn import find_executable
from xml.etree import ElementTree as ET
from CIME.utils import run_cmd
logger.info("Writing pe node info to %s", pefilename)
root = ET.Element('config_pes')
grid = ET.SubElement(root, 'grid')
grid.set('name', 'any')
mach = ET.SubElement(grid, 'mach')
mach.set('name', 'any')
pes = ET.SubElement(mach, 'pes')
pes.set('compset', 'any')
pes.set('pesize', '')
ntasks_node = ET.SubElement(pes, 'ntasks')
for k in ntasks:
node = ET.SubElement(ntasks_node, 'ntasks_' + k)
node.text = str(ntasks[k])
nthrds_node = ET.SubElement(pes, 'nthrds')
for k in nthrds:
node = ET.SubElement(nthrds_node, 'nthrds_' + k)
node.text = str(nthrds[k])
rootpe_node = ET.SubElement(pes, 'rootpe')
for k in roots:
node = ET.SubElement(rootpe_node, 'rootpe_' + k)
node.text = str(roots[k])
xmllint = find_executable("xmllint")
if xmllint is not None:
run_cmd("%s --format --output %s -" % (xmllint, pefilename),
input_str=ET.tostring(root))
``` |
{
"source": "jingxianwen/e3sm_diags",
"score": 2
} |
#### File: plot/vcs/zonal_mean_xy_plot.py
```python
from __future__ import print_function
import os
import sys
import vcs
import acme_diags
import acme_diags.plot.vcs as utils
from acme_diags.driver.utils.general import get_output_dir
textcombined_objs = {}
def managetextcombined(tt_name, to_name, vcs_canvas):
"""Caches textcombined objects"""
new_name = "%s:::%s" % (tt_name, to_name)
mytc = textcombined_objs.get(new_name, None)
if mytc is None:
mytc = vcs_canvas.createtextcombined(
Tt_source=tt_name, To_source=to_name)
textcombined_objs[new_name] = mytc
return mytc
def plot(ref, test, diff, metrics_dict, parameters):
vcs_canvas = vcs.init(bg=True, geometry=(
parameters.canvas_size_w, parameters.canvas_size_h))
# Line options, see here: https://uvcdat.llnl.gov/documentation/vcs/vcs-10.html
# Other options not in the above link:
# https://uvcdat.llnl.gov/docs/vcs/graphics/unified1D.html
ref_plot_linetype = 0
ref_plot_color = 215 # 6 to 239
ref_plot_width = 3 # 1 to 100
ref_plot_marker = 1
ref_plot_markersize = 1
ref_plot_markercolor = 215
test_plot_linetype = 0
test_plot_color = 1
test_plot_width = 3
test_plot_marker = 1
test_plot_markersize = 1
test_plot_markercolor = 1
diff_plot_linetype = 0
diff_plot_color = 1
diff_plot_width = 3
diff_plot_marker = 1
diff_plot_markersize = 1
diff_plot_markercolor = 1
file_path = os.path.join(acme_diags.INSTALL_PATH, 'zonal_mean_xy')
vcs_canvas.scriptrun(os.path.join(file_path, 'plot_set_3.json'))
utils.set_units(test, parameters.test_units)
utils.set_units(ref, parameters.reference_units)
utils.set_units(diff, parameters.diff_units)
if hasattr(test, 'long_name'):
test.long_name = parameters.test_title if parameters.test_title is not '' else test.long_name
if hasattr(ref, 'long_name'):
ref.long_name = parameters.reference_title if parameters.reference_title is not '' else ref.long_name
if hasattr(diff, 'long_name'):
diff.long_name = parameters.diff_title if parameters.diff_title is not '' else diff.long_name
test.id = str(
parameters.test_name_yrs) if parameters.test_name_yrs is not '' else test.id
ref.id = str(
parameters.ref_name_yrs) if parameters.ref_name_yrs is not '' else ref.id
diff.id = str(
parameters.diff_name) if parameters.diff_name is not '' else diff.id
# use vcs_canvas.show('colormap') to view all colormaps
# 6 to 239 are purple to red in rainbow order
vcs_canvas.setcolormap('rainbow')
ref_test_template = vcs.gettemplate('ref_test_template')
# Turn off the units of the axes in the plots.
ref_test_template.xunits.priority = 0
ref_test_template.yunits.priority = 0
ref_test_yaxis_title = managetextcombined(
'ref_test_yaxis_title', 'ref_test_yaxis_title', vcs_canvas)
ref_test_yaxis_title.angle = 270
ref_test_yaxis_title.halign = 'center'
ref_test_yaxis_title.y = (
ref_test_template.data.y1 + ref_test_template.data.y2) / 2
ref_test_yaxis_title.x = ref_test_template.data.x1 - 0.08
ref_test_yaxis_title.string = test.long_name + ' (' + test.units + ')'
vcs_canvas.plot(ref_test_yaxis_title)
ref_test_template.legend.priority = 0
ref_test_template.title.priority = 0
# the actual box around the plot
ref_test_template.box1.x1 = 0.1223
ref_test_template.box1.x2 = 0.96
ref_test_template.box1.y1 = 0.55
ref_test_template.box1.y2 = 0.90
# data (the lines) need to be offset accordingly
ref_test_template.data.x1 = 0.1223
ref_test_template.data.x2 = 0.96
ref_test_template.data.y1 = 0.55
ref_test_template.data.y2 = 0.90
ref_test_template.units.textorientation = 'defright'
ref_test_template.units.x = 0.96
ref_test_template.units.y = 0.91
# labels on xaxis
ref_test_template.xlabel1.y = (0.55) - 0.02 # no xlabel1.x attribute
# actual ticks on xaxis
ref_test_template.xtic1.y1 = (0.55 - 0.005) + 0.01
ref_test_template.xtic1.y2 = (0.55 - 0.005)
# name of xaxis
# ref_test_template.xname.y += 0.29
# labels on yaxis
ref_test_template.ylabel1.x = 0.11 # no ylabel1.y attribute
# actual ticks on yaxis
ref_test_template.ytic1.x1 = (0.1223 - 0.006) + 0.01
ref_test_template.ytic1.x2 = (0.1223 - 0.006)
diff_template = vcs.gettemplate('diff_template')
# Turn off the units of the axes in the plots.
diff_template.xunits.priority = 0
diff_template.yunits.priority = 0
diff_yaxis_title = managetextcombined(
'diff_yaxis_title', 'diff_yaxis_title', vcs_canvas)
diff_yaxis_title.angle = 270
diff_yaxis_title.halign = 'center'
diff_yaxis_title.y = (diff_template.data.y1 + diff_template.data.y2) / 2
diff_yaxis_title.x = diff_template.data.x1 - 0.08
diff_yaxis_title.string = test.long_name + ' (' + test.units + ')'
vcs_canvas.plot(diff_yaxis_title)
diff_template.units.textorientation = 'defright'
diff_template.units.x += 0.01
diff_template.legend.priority = 0
diff_template.ytic1.x1 = (0.1223 - 0.006) + 0.01
diff_template.ytic1.x2 = (0.1223 - 0.006)
diff_template.ylabel1.x = 0.11 # no ylabel1.y attribute
diff_template.units.textorientation = 'defright'
diff_template.units.x = 0.96
ref_line = vcs_canvas.getxvsy('ref_plot')
ref_line.datawc_y1 = min(ref.min(), test.min())
ref_line.datawc_y2 = max(ref.max(), test.max())
ref_line.datawc_x1 = -90
ref_line.datawc_x2 = 90
ref_line.xticlabels1 = {-90: "90S", -60: "60S",
-30: "30S", 0: "Eq", 30: "30N",
60: "60N", 90: "90N"}
test_line = vcs_canvas.getxvsy('test_plot')
test_line.datawc_y1 = min(ref.min(), test.min())
test_line.datawc_y2 = max(ref.max(), test.max())
test_line.datawc_x1 = -90
test_line.datawc_x2 = 90
diff_line = vcs_canvas.getxvsy('diff_plot')
diff_line.datawc_y1 = diff.min()
diff_line.datawc_y2 = diff.max()
diff_line.datawc_x1 = -90
diff_line.datawc_x2 = 90
diff_line.xticlabels1 = {-90: "90S", -60: "60S",
-30: "30S", 0: "Eq", 30: "30N",
60: "60N", 90: "90N"}
ref_line.linetype = ref_plot_linetype
ref_line.linecolor = ref_plot_color
ref_line.linewidth = ref_plot_width
ref_line.marker = ref_plot_marker
ref_line.markersize = ref_plot_markersize
ref_line.markercolor = ref_plot_markercolor
test_line.linetype = test_plot_linetype
test_line.linecolor = test_plot_color
test_line.linewidth = test_plot_width
test_line.marker = test_plot_marker
test_line.markersize = test_plot_markersize
test_line.markercolor = test_plot_markercolor
diff_line.linetype = diff_plot_linetype
diff_line.linecolor = diff_plot_color
diff_line.linewidth = diff_plot_width
diff_line.marker = diff_plot_marker
diff_line.markersize = diff_plot_markersize
diff_line.markercolor = diff_plot_markercolor
blank_template = vcs_canvas.gettemplate('blank_template')
blank_template.legend.priority = 0
blank_template.data.priority = 1
vcs_canvas.plot(ref, ref_line, ref_test_template)
vcs_canvas.plot(test, test_line, blank_template)
vcs_canvas.plot(diff, diff_line, diff_template)
# Plot the main title
main_title = managetextcombined('main_title', 'main_title', vcs_canvas)
main_title.string = parameters.main_title
# for some reason, this needs to be before a call to vcs_canvas.plot()
vcs_canvas.portrait()
vcs_canvas.plot(main_title)
test_title = managetextcombined('test_title', 'test_title', vcs_canvas)
test_title.string = "Test: " + str(parameters.test_name)
test_title.color = 1
test_title.x = ref_test_template.data.x1 - 0.05
test_title.y = ref_test_template.data.y2 + 0.045
test_title.height = 12
vcs_canvas.plot(test_title)
ref_title = managetextcombined('ref_title', 'ref_title', vcs_canvas)
ref_title.string = "Reference: " + str(parameters.reference_name)
ref_title.color = 215
ref_title.x = ref_test_template.data.x1 - 0.05
ref_title.y = ref_test_template.data.y2 + 0.025
ref_title.height = 12
vcs_canvas.plot(ref_title)
if not parameters.logo:
vcs_canvas.drawlogooff()
fnm = os.path.join(get_output_dir(parameters.current_set,
parameters), parameters.output_file)
for f in parameters.output_format:
f = f.lower().split('.')[-1]
if f == 'png':
vcs_canvas.png(fnm)
elif f == 'pdf':
vcs_canvas.pdf(fnm)
elif f == 'svg':
vcs_canvas.svg(fnm)
# Get the filename that the user has passed in and display that.
# When running in a container, the paths are modified.
fnm = os.path.join(get_output_dir(parameters.current_set, parameters,
ignore_container=True), parameters.output_file)
print('Plot saved in: ' + fnm + '.' + f)
vcs_canvas.clear()
``` |
{
"source": "jingxianwen/rte_rrtmgp_lw_scat",
"score": 3
} |
#### File: examples/rfmip-clear-sky/generate-output-file-templates.py
```python
from netCDF4 import Dataset
import numpy as np
import time, uuid, argparse
import urllib.request, json
# ---------------------------------------------------------------------------------
# Copy a variable and all its attributes from one netCDF file to another
#
def copyVar(nc_in, nc_out, name, newname=None) :
if newname is None :
newname = name
nc_out.createVariable(newname, nc_in.variables[name].dtype, nc_in.variables[name].dimensions)
nc_out.variables[newname].setncatts(nc_in.variables[name].__dict__)
nc_out.variables[newname][:] = nc_in.variables[name][:]
# ---------------------------------------------------------------------------------
atmos_file = Dataset('multiple_input4MIPs_radiation_RFMIP_UColorado-RFMIP-1-2_none.nc', mode='r')
# Available from https://www.earthsystemcog.org/projects/rfmip/resources/
# or from https://esgf-node.llnl.gov/search/input4mips/ ; search for "RFMIP"
parser = argparse.ArgumentParser(description='Create CMIP6/ESGF-compliant output files for RFMIP-IRF.')
parser.add_argument('--source_id', type=str, \
default = "LBLRTM-12-8",
help='Source ID, must match CMIP Controlled Vocabulary at https://github.com/WCRP-CMIP/CMIP6_CVs/blob/master/CMIP6_source_id.json')
parser.add_argument('--forcing_index', type=int, \
default = 1, \
help='Forcing index (1 = all available greenhouse gases; 2 = CO2, CH4, N2O, CFC12, CFC11eq; 3 = CO2, CH4, N2O, CFC12eq, HFC-134eq)')
parser.add_argument('--physics_index', type=int, \
default = 1, \
help='Physics index, e.g. for different approximations')
args = parser.parse_args()
#
# Check that source_id is valid
# Use source_id to obtain other text
#
with urllib.request.urlopen("https://raw.githubusercontent.com/PCMDI/cmip6-cmor-tables/master/Tables/CMIP6_CV.json") as url:
cmip6 = json.loads(url.read().decode())
if(args.source_id in cmip6['CV']['source_id'].keys()):
source_id = args.source_id
source = cmip6['CV']['source_id'][source_id]['source']
institution_id = cmip6['CV']['source_id'][source_id]['institution_id'][0]
institution = cmip6['CV']['institution_id'][institution_id]
physics_index = np.int32(args.physics_index)
forcing_index = np.int32(args.forcing_index)
else:
print("source_id {} is not in CMIP6 Controlled Vocabulary".format(args.source_id))
sys.exit(1)
if (forcing_index < 1 ) or (forcing_index > 3):
print('forcing_index must be 1, 2, or 3 (1 = all available greenhouse gases; 2 = CO2, CH4, N2O, CFC12, CFC11eq; 3 = CO2, CH4, N2O, CFC12eq, HFC-134eq)')
sys.exit(1)
if (physics_index < 1 ):
print('physics_index must be positive')
sys.exit(1)
#
# Model/institution specific attributes
#
variant_label = "r1i1p{0}f{1}".format(physics_index,forcing_index)
model_attrs = {
"institution_id" :institution_id,
"institution" :institution,
"source_id" :source_id,
"source" :source_id,
"further_info_url":"https://furtherinfo.es-doc.org/CMIP6." + institution_id + "." + source_id + ".rad-irf.none." + variant_label,
"forcing_index" :np.int32(forcing_index),
"license" :"CMIP6 model data produced by " + institution_id + " is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (https://creativecommons.org/licenses). " +
"Consult https://pcmdi.llnl.gov/CMIP6/TermsOfUse for terms of use governing CMIP6 output, including citation requirements and proper acknowledgment. " +
"Further information about this data, including some limitations, can be found via the further_info_url (recorded as a global attribute in this file) and at https://pcmdi.llnl.gov/." +
"The data producers and data providers make no warranty, either express or implied, including, but not limited to, warranties of merchantability and fitness for a particular purpose." +
"All liabilities arising from the supply of the information (including any liability arising in negligence) are excluded to the fullest extent permitted by law." }
#
# Required attributes, uniform across submissions
#
std_attrs = {
"data_specs_version":"01.00.29",
"physics_index":np.int32(physics_index)}
# Submission attrs
sub_attrs = {
'creation_date':time.strftime("%Y-%m-%dT%H:%M:%SZ",time.gmtime()),
'tracking_id' : '/'.join(['hdl:21.14100',str(uuid.uuid4())]),
"variant_label":variant_label}
# Attributes are taken from https://docs.google.com/document/d/1h0r8RZr_f3-8egBMMh7aqLwy3snpD6_MrDz1q8n5XUk/edit
# Data reference syntax attributes
drs_attrs = {
"activity_id" :"RFMIP", # (from CMIP6_activity_id.json)
"product" :"model-output",
"experiment_id":"rad-irf", # (from CMIP6_experiment_id.json)
"table_id" :"Efx", # (per http://clipc-services.ceda.ac.uk/dreq/u/efc0de22-5629-11e6-9079-ac72891c3257.html)
"frequency" :"fx",
"sub_experiment_id":"none"}
expt_attrs = {
"Conventions" :"CF-1.7 CMIP-6.2",
"mip_era" :"CMIP6",
"experiment" :"offline assessment of radiative transfer parmeterizations in clear skies",
"sub_experiment" :"none",
"product" :"model-output",
"realization_index" :np.int32(1),
"initialization_index":np.int32(1),
"source_type" :"RAD",
"nominal_resolution" :"10 km",
"realm" :"atmos",
"grid_label" :"gn",
"grid" :"columns sampled from ERA-Interim, radiative fluxes computed independently"}
short_names = ['rlu','rsu', 'rld', 'rsd']
stand_names = ['upwelling_longwave_flux_in_air','upwelling_shortwave_flux_in_air',
'downwelling_longwave_flux_in_air','downwelling_shortwave_flux_in_air']
for short, std in zip(short_names, stand_names) :
# File name is constructed per https://docs.google.com/document/d/1h0r8RZr_f3-8egBMMh7aqLwy3snpD6_MrDz1q8n5XUk/edit#
# fixed strings are table_id, experiment_id, grid_label
out_file_name = short + "_Efx_" + source_id + "_rad-irf_" + variant_label + "_gn" + ".nc"
print('Creating ' + out_file_name)
out_file = Dataset(out_file_name, mode='w', FORMAT='NETCDF4_CLASSIC')
out_file.setncatts(drs_attrs)
out_file.setncatts(std_attrs)
out_file.setncatts(expt_attrs)
out_file.setncatts(model_attrs)
out_file.setncatts(sub_attrs)
out_file.setncatts({'variable_id' :short})
d = out_file.createDimension('expt', atmos_file.dimensions['expt'].size)
d = out_file.createDimension('site', atmos_file.dimensions['site'].size)
d = out_file.createDimension('level', atmos_file.dimensions['level'].size)
copyVar(atmos_file, out_file, 'lat')
copyVar(atmos_file, out_file, 'lon')
copyVar(atmos_file, out_file, 'time')
copyVar(atmos_file, out_file, 'pres_level', 'plev')
v = out_file.createVariable(short, 'f4', ('expt', 'site', 'level'))
v.setncatts({'variable_id' :short,
'standard_name':std,
'units' :'W m-2',
'_FillValue' :np.float32(-1.e+03),
'missing_value':np.float32(-1.e+03),
"cell_methods" :"area: point",
'coordinates' :'lon lat time'})
copyVar(atmos_file, out_file, 'profile_weight')
out_file.close()
``` |
{
"source": "jingxiaoliu/Bridge-damage-segmentation",
"score": 2
} |
#### File: Bridge-damage-segmentation/apis/train_damage_pure.py
```python
import torch, torchvision
import os
import numpy as np
from PIL import Image
import mmcv
from mmcv import Config
from mmseg.datasets.builder import DATASETS
from mmseg.datasets.custom import CustomDataset
from mmseg.apis import set_random_seed
from mmseg.datasets import build_dataset
from mmseg.models import build_segmentor
from mmseg.apis import train_segmentor
from mmcv.runner import get_dist_info, init_dist
from datetime import datetime
import argparse
from random import randint
# Training arguments, check the help message for details.
parser = argparse.ArgumentParser()
parser.add_argument("--nw", type=str, required=True,help="Network name.")
parser.add_argument("--conf", type=str, required=True,help="Config path.")
parser.add_argument("--cp", type=str, required=True,help="checkpoint path.")
parser.add_argument("--dr", type=str, required=True,help="Data root.")
parser.add_argument("--local_rank", type=int, help="")
parser.add_argument("--train_split", type=str, required=True, help="Split file for training")
parser.add_argument("--val_split", type=str, required=True, help="Split file for testing")
parser.add_argument("--bs", type=int, required=True,help="Batch size.")
parser.add_argument("--width", type=int, default=640, help='Image width. ')
parser.add_argument("--height", type=int, default=360, help='Image height. ')
parser.add_argument("--distributed", action='store_true')
parser.add_argument("--resume_from", type=str, help="Resume from a previous checkpoint. Pass the checkpoint path as an argument.")
parser.add_argument("--iter", type=int, default=40000, help='Max number of iterations.')
parser.add_argument("--log_iter", type=int, default=10, help="The interval for logging.")
parser.add_argument("--eval_iter", type=int, default=200, help="Validation interval.")
parser.add_argument("--checkpoint_iter", type=int, default=2000, help="Checkpoint interval.")
parser.add_argument("--learning_rate", type=float, help="Learning rate of the optimizer.")
parser.add_argument("--ohem", action='store_true')
parser.add_argument("--multi_loss", action='store_true')
parser.add_argument("--job_name", type=str, default='', help="job name used in sbatch to create folders.")
args = parser.parse_args()
# Concrete segmentation dataset: Two classes only.
classes = ('Undefined','Undamaged', 'ConcreteDamage', 'ExposedRebar')
palette = [[0,0,0], [128, 128, 128], [129, 127, 38], [120, 69, 125]]
@DATASETS.register_module()
class TokaidoDataset(CustomDataset):
CLASSES = classes
PALETTE = palette
def __init__(self, split, **kwargs):
super().__init__(img_suffix='.png', seg_map_suffix='.bmp',
split=split, **kwargs)
assert os.path.exists(self.img_dir) and self.split is not None
# Setup config
data_root = args.dr
num_classes = len(classes)
batch_size = args.bs
image_size = (args.width,args.height)
img_dir = 'images_puretex'
ann_dir = os.path.join('synthetic_puretex', 'labdmg_resize')
train_split = args.train_split
val_split = args.val_split
checkpoint_dir = args.cp
dt_string = datetime.now().strftime("%Y%m%d-%H%M%S")
job_name = args.job_name + "_" + args.nw + "_" + dt_string
network = args.nw
def generate_config(config_path):
cfg = Config.fromfile(config_path)
# class_weight = [0.9939,0.0257,0.9822,0.9981]
# Since we use ony one GPU, BN is used instead of SyncBN
if(args.distributed):
cfg.norm_cfg = dict(type='SyncBN', requires_grad=True)
else:
cfg.norm_cfg = dict(type='BN', requires_grad=True)
# cfg.model.backbone.norm_cfg = cfg.norm_cfg
cfg.model.backbone.pretrained = None
if network == 'resnest' or network == 'pspnet' or network == 'swin' or network == 'vit':
if args.ohem:
cfg.model.decode_head.sampler = dict(type='OHEMPixelSampler')
cfg.model.decode_head.norm_cfg = cfg.norm_cfg
cfg.model.auxiliary_head.norm_cfg = cfg.norm_cfg
cfg.model.decode_head.num_classes = num_classes
# cfg.model.decode_head.loss_decode.class_weight = class_weight
cfg.model.auxiliary_head.num_classes = num_classes
# cfg.model.auxiliary_head.loss_decode.class_weight = class_weight
if network == 'swin':
del cfg.model.backbone.pretrain_style
elif network == 'ocrnet':
if args.ohem:
cfg.model.decode_head[0].sampler = dict(type='OHEMPixelSampler')
cfg.model.decode_head[1].sampler = dict(type='OHEMPixelSampler')
if args.multi_loss:
cfg.model.decode_head[0].loss_decode = [dict(type='CrossEntropyLoss', loss_name='loss_ce',
loss_weight=1.0),
dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0)]
cfg.model.decode_head[1].loss_decode = [dict(type='CrossEntropyLoss', loss_name='loss_ce',
loss_weight=1.0),
dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0)]
cfg.model.decode_head[0].norm_cfg = cfg.norm_cfg
cfg.model.decode_head[1].norm_cfg = cfg.norm_cfg
cfg.model.decode_head[0].num_classes = num_classes
# cfg.model.decode_head[0].loss_decode.class_weight = class_weight
cfg.model.decode_head[1].num_classes = num_classes
# cfg.model.decode_head[1].loss_decode.class_weight = class_weight
elif network == 'hrnet':
if args.ohem:
cfg.model.decode_head.sampler = dict(type='OHEMPixelSampler')
cfg.model.decode_head.norm_cfg = cfg.norm_cfg
cfg.model.decode_head.num_classes = num_classes
# cfg.model.decode_head.loss_decode.class_weight = class_weight
# Modify dataset type and path
cfg.dataset_type = 'TokaidoDataset'
cfg.data_root = data_root
cfg.resume_from = args.resume_from
cfg.data.samples_per_gpu = batch_size
cfg.data.workers_per_gpu = 4
cfg.img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
cfg.crop_size = (256, 256)
cfg.train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=image_size, ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=cfg.crop_size),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='RandomRotate',prob=0.5,degree=90,pad_val=0,seg_pad_val=0),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='Pad', size=cfg.crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
cfg.test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=image_size,
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
cfg.data.train.type = cfg.dataset_type
cfg.data.train.data_root = cfg.data_root
cfg.data.train.img_dir = img_dir
cfg.data.train.ann_dir = ann_dir
cfg.data.train.pipeline = cfg.train_pipeline
cfg.data.train.split = train_split
cfg.data.val.type = cfg.dataset_type
cfg.data.val.data_root = cfg.data_root
cfg.data.val.img_dir = img_dir
cfg.data.val.ann_dir = ann_dir
cfg.data.val.pipeline = cfg.test_pipeline
cfg.data.val.split = val_split
cfg.data.test.type = cfg.dataset_type
cfg.data.test.data_root = cfg.data_root
cfg.data.test.img_dir = img_dir
cfg.data.test.ann_dir = ann_dir
cfg.data.test.pipeline = cfg.test_pipeline
cfg.data.test.split = val_split
# Set up working dir to save files and logs.
cfg.work_dir = os.path.join(checkpoint_dir, job_name + "_" + network)
if not os.path.exists(cfg.work_dir):
os.makedirs(cfg.work_dir, exist_ok=True)
cfg.runner.max_iters = args.iter
cfg.log_config.interval = args.log_iter
cfg.evaluation.interval = args.eval_iter
cfg.checkpoint_config.interval = args.checkpoint_iter
# Set seed
cfg.seed = randint(0,10000)
set_random_seed(randint(0,10000), deterministic=False)
if(args.distributed):
init_dist('pytorch', **cfg.dist_params)
# gpu_ids is used to calculate iter when resuming checkpoint
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
else:
cfg.gpu_ids = [0]
if args.learning_rate is not None:
cfg.optimizer = dict(type='SGD', lr=args.learning_rate, momentum=0.9, weight_decay=0.0005)
# dump config
cfg.dump(os.path.join(cfg.work_dir,job_name+'.py'))
# Have a look at the final config used for training
print(f'Config:\n{cfg.pretty_text}')
return cfg
def main():
# Build the dataset
base_config = args.conf
cfg = generate_config(base_config)
datasets = [build_dataset(cfg.data.train)]
# Build the detector
model = build_segmentor(
cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg'))
# Add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
# Create work_dir
mmcv.mkdir_or_exist(os.path.abspath(cfg.work_dir))
train_segmentor(model, datasets, cfg, distributed=args.distributed, validate=True,
meta=dict())
if __name__ == "__main__":
main()
```
#### File: Bridge-damage-segmentation/modules/data_prep.py
```python
import os
import argparse
from tqdm import tqdm
import cv2
import numpy as np
import random
from PIL import Image
import glob
parser = argparse.ArgumentParser()
parser.add_argument("--option", type=str, required=True,
help="Use resize|count|group|verify|split|resample to preprocess datasets.")
parser.add_argument("--input", type=str, required=True,
help="Input directory, usually the directory of images.")
parser.add_argument("--output", type=str, required=True,
help="Output directory if there is any output")
parser.add_argument("--data_root", type=str, required=False,
help="Taikoda data root")
parser.add_argument("--split_csv", type=str, required=False,
help="Split file")
parser.add_argument("--lbl_dir", type=str, required=False,
help="Label directory")
parser.add_argument("--width", type=int, default=1920)
parser.add_argument("--height", type=int, default=1080)
parser.add_argument("--nearest", type=bool, default=True)
parser.add_argument("--resampling", type=bool, default=False)
parser.add_argument("--test", action='store_true')
args = parser.parse_args()
def mask_imgs(in_dir, out_dir, split_csv, lbl_dir):
if not os.path.exists(in_dir):
print("Input directory {0} not exists".format(in_dir))
return
if not os.path.exists(out_dir):
os.makedirs(out_dir)
split_csv = args.split_csv
images = []
with open(split_csv, 'r') as f:
lines = f.readlines()
for line in lines:
images.append(line.strip('\n'))
images = list(images)
print(images[0])
print("Testing images for damage detection: ", len(images))
for i in tqdm(range(len(images)), desc="Masking validation images..."):
img = np.array(cv2.imread(os.path.join(in_dir, images[i]+'_Scene.png'), cv2.IMREAD_UNCHANGED))
lbl = np.tile(np.expand_dims(np.array(Image.open(os.path.join(lbl_dir, images[i]+'.bmp')).resize((1920,1080))),2),(1,1,3)).astype(np.uint8)
if img is None:
print('Wrong path:', os.path.join(in_dir, images[i]))
else:
img[lbl != 4] = 0
cv2.imwrite(os.path.join(out_dir, images[i]+'_Scene.png'), img)
def resize_imgs(in_dir, out_dir, width, height, nearest=True):
if not os.path.exists(in_dir):
print("Input directory {0} not exists".format(in_dir))
return
if not os.path.exists(out_dir):
os.makedirs(out_dir)
img_list = os.listdir(in_dir)
img_list.sort()
for img_name in tqdm(img_list, desc="Processing ..."):
img = cv2.imread(os.path.join(in_dir, img_name), cv2.IMREAD_UNCHANGED)
if img is None:
print('Wrong path:', os.path.join(in_dir, img_name))
else:
out_img = cv2.resize(img, (width , height), cv2.INTER_NEAREST)
cv2.imwrite(os.path.join(out_dir, img_name), out_img)
def splitbycase(in_dir, out_dir, data_root, seed=13, resampling=False):
if not os.path.exists(in_dir):
print("Input directory {0} not exists".format(in_dir))
return
if not os.path.exists(out_dir):
os.makedirs(out_dir)
train_images_cmp = []
train_images_dmg = []
with open(in_dir, 'r') as f:
lines = f.readlines()
for line in lines:
words = line.replace("\\", "/").strip("\n").split(",")
# valid image for cmp training
if (words[5] == 'True'):
train_images_cmp.append(os.path.basename(words[0].strip("_Scene.png")))
if (words[6] == 'True'):
train_images_dmg.append(os.path.basename(words[0].strip("_Scene.png")))
train_images_cmp = list(train_images_cmp)
train_images_dmg = list(train_images_dmg)
random.seed(seed)
cases = list(np.arange(0,175,1))
random.shuffle(cases)
linspace = list(np.arange(0,175,10))
# 10-fold
for i in range(10):
train_images_cmp_train = []
train_images_cmp_val = []
train_images_dmg_train = []
train_images_dmg_val = []
case_id = cases[linspace[i]:linspace[i+1]]
for name in train_images_cmp:
if name.split('_')[1][4:] in str(case_id):
train_images_cmp_val.append(name)
else:
train_images_cmp_train.append(name)
for name in train_images_dmg:
if name.split('_')[1][4:] in str(case_id):
train_images_dmg_val.append(name)
else:
train_images_dmg_train.append(name)
with open(os.path.join(out_dir, 'train_cmp'+str(i)+'.txt'), 'w') as f:
# select the ratio portion as training set
n=1
for line in train_images_cmp_train:
repeat = 1
if resampling:
file_name = data_root+'/synthetic/train/labcmp/'+line+'.bmp'
img = np.array(Image.open(file_name))
# if sleeper
er_labels = np.where(img==7)[0]
if len(er_labels) >= 10:
n+=1
repeat = 10
# if non-structural
er_labels1 = np.where(img==5)[0]
if len(er_labels1) >= 10:
n+=1
repeat = 10
for r in range(repeat):
f.writelines(line + '\n')
with open(os.path.join(out_dir, 'val_cmp'+str(i)+'.txt'), 'w') as f:
# select the rest as validation set
f.writelines(line + '\n' for line in train_images_cmp_val)
with open(os.path.join(out_dir, 'train_dmg'+str(i)+'.txt'), 'w') as f:
# select the ratio portion as training set
for line in train_images_dmg_train:
repeat = 1
if resampling:
file_name = data_root+'/synthetic/train/labdmg/'+line+'.bmp'
img = np.array(Image.open(file_name))
# if exposed rebar
er_labels = np.where(img==3)[0]
if len(er_labels) >= 10:
n+=1
repeat = 3
for r in range(repeat):
f.writelines(line + '\n')
with open(os.path.join(out_dir, 'val_dmg'+str(i)+'.txt'), 'w') as f:
# select the rest as validation set
f.writelines(line + '\n' for line in train_images_dmg_val)
def split_puretex(in_dir, out_dir, data_root, test=False, train_ratio=0.9, seed=13, resampling=False):
if not os.path.exists(in_dir):
print("Input directory {0} not exists".format(in_dir))
return
if not os.path.exists(out_dir):
os.makedirs(out_dir)
train_images = []
with open(in_dir, 'r') as f:
lines = f.readlines()
for line in lines:
words = line.replace("\\", "/").strip("\n").split(",")
# valid image
train_images.append(os.path.basename(words[0].strip(".png")))
train_images = list(train_images)
if not test:
random.seed(seed)
random.shuffle(train_images)
with open(os.path.join(out_dir, 'train_puretex.txt'), 'w') as f:
# select the ratio portion as training set
train_length = int(len(train_images) * train_ratio)
for line in train_images[:train_length]:
repeat = 1
if resampling:
file_name = data_root+'/synthetic_puretex/labdmg/'+line+'.bmp'
img = np.array(Image.open(file_name))
er_labels = np.where(img==3)[0]
# print(er_labels)
if len(er_labels) >= 10:
repeat = 5
for r in range(repeat):
f.writelines(line + '\n')
with open(os.path.join(out_dir, 'val_puretex.txt'), 'w') as f:
# select the rest as validation set
f.writelines(line + '\n' for line in train_images[train_length:])
else:
with open(os.path.join(out_dir, 'test_puretex.txt'), 'w') as f:
f.writelines(line+'\n' for line in train_images)
def main():
print(args.option)
if(args.option == "resize"):
resize_imgs(args.input, args.output, args.width, args.height)
if(args.option == "split_puretex"):
split_puretex(args.input, args.output, args.data_root, args.test, resampling=args.resampling)
if(args.option == "splitbycase"):
splitbycase(args.input, args.output, args.data_root, resampling=args.resampling)
if(args.option == "mask_imgs"):
mask_imgs(args.input, args.output, args.split_csv, args.lbl_dir)
if __name__ == "__main__":
main()
```
#### File: Bridge-damage-segmentation/modules/viz_label.py
```python
import cv2
import numpy as np
import os
from PIL import Image
import argparse
# dmg coding
Concrete_dmg = [128, 0, 0]
Rebar_dmg = [0, 128, 0]
Not_dmg = [0, 0, 128]
Undefined = [0, 0, 0]
# cmp coding
Nonbridge = [0,128,192]
Slab = [128,0,0]
Beam = [192,192,128]
Column = [128,64,128]
Nonstructure = [60,40,222]
Rail = [128,128,0]
Sleeper = [192,128,128]
Other = [64,64,128]
CMP_CMAP = np.array([Undefined, Nonbridge, Slab, Beam, Column, Nonstructure, Rail, Sleeper, Other], dtype=np.uint8)
DMG_CMAP = np.array([Undefined, Not_dmg, Concrete_dmg, Rebar_dmg], dtype=np.uint8)
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, required=True,
help="Input directory, usually the directory of predictions.")
parser.add_argument("--output", type=str, required=True,
help="Output directory if there is any output")
parser.add_argument("--raw_input", type=str,
help="The directory of original images.")
parser.add_argument("--cmp", action='store_true')
args = parser.parse_args()
def labelViz(img, num_class, cmap):
img = img[:,:,0] if len(img.shape) == 3 else img
img_out = np.zeros(img.shape + (3,), dtype=np.uint8)
for i in range(num_class):
img_out[img == i, :] = cmap[i]
return img_out
def main():
in_dir = args.input
out_dir = args.output
ori_dir = args.raw_input
is_cmp = args.cmp
img_list = os.listdir(in_dir)
for img_name in img_list:
img = cv2.imread(os.path.join(in_dir, img_name))
if is_cmp:
viz = labelViz(img, 9, CMP_CMAP)
else:
viz = labelViz(img, 4, DMG_CMAP)
if not os.path.exists(os.path.join(out_dir, 'png')):
os.makedirs(os.path.join(out_dir, 'png'))
cv2.imwrite(os.path.join(out_dir, 'png' ,img_name.replace(".bmp", ".png")), viz)
if not os.path.exists(os.path.join(out_dir, 'ori')):
os.makedirs(os.path.join(out_dir, 'ori'))
img = cv2.imread(os.path.join(ori_dir, img_name.replace(".bmp", "_Scene.png")))
cv2.imwrite(os.path.join(out_dir, 'ori' ,img_name.replace(".bmp", "_Scene.png")), img)
if __name__ == "__main__":
main()
``` |
{
"source": "jingxiaorobin/DeepLearning2019",
"score": 3
} |
#### File: jingxiaorobin/DeepLearning2019/model.py
```python
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
# Architecture
# TODO
# Load pre-trained model
self.load_weights('weights.pth')
def load_weights(self, pretrained_model_path, cuda=True):
# Load pretrained model
pretrained_model = torch.load(f=pretrained_model_path, map_location="cuda" if cuda else "cpu")
# Load pre-trained weights in current model
with torch.no_grad():
self.load_state_dict(pretrained_model, strict=True)
# Debug loading
print('Parameters found in pretrained model:')
pretrained_layers = pretrained_model.keys()
for l in pretrained_layers:
print('\t' + l)
print('')
for name, module in self.state_dict().items():
if name in pretrained_layers:
assert torch.equal(pretrained_model[name].cpu(), module.cpu())
print('{} have been loaded correctly in current model.'.format(name))
else:
raise ValueError("state_dict() keys do not match")
def forward(self, x):
# TODO
raise NotImplementedError
``` |
{
"source": "jingxinfu/infercnvpy",
"score": 3
} |
#### File: infercnvpy/datasets/__init__.py
```python
import importlib.resources as pkg_resources
from . import data
import scanpy as sc
from anndata import AnnData
from scanpy import settings
from scanpy.readwrite import read
def oligodendroglioma() -> AnnData:
"""The original inferCNV example dataset.
Derived from :cite:`Tirosh2016`.
"""
with pkg_resources.path(data, "oligodendroglioma.h5ad") as p:
return sc.read_h5ad(p)
def maynard2020_3k() -> AnnData:
"""\
Return the dataset from :cite:`Maynard2020` as AnnData object, downsampled
to 3000 cells.
In brief, this data set was processed as follows:
* raw data downloaded from ENA
* gene expression quantified using Salmon and the nf-core/rnaseq pipeline.
* basic quality control (min_counts=20k, max_counts=5M, min_genes=1k, max_mitochondrial_fraction=0.2)
* filtered to 6000 HVG using `sc.pp.highly_variable_genes(..., flavor="seurat_v3")`
* raw counts processed using scVI, providing sample information as batch key.
* cell types manually annotated based on marker genes and leiden clustering and subclustering.
* downsampled to 3000 cells.
`adata.X` contains the `log1p` transformed, cpm-normalized raw counts.
The `scVI` latent representation is stored in `adata.obsm["X_scVI"]`.
A UMAP for the 3000 cells is precomputed.
"""
url = "https://github.com/icbi-lab/infercnvpy/releases/download/d0.1.0/maynard2020_3k.h5ad"
filename = settings.datasetdir / "maynard2020_3k.h5ad"
adata = read(filename, backup_url=url)
return adata
``` |
{
"source": "jingxinfu/TCGAdnloader",
"score": 2
} |
#### File: TCGAdnloader/TCGAdnloader/downloader.py
```python
import subprocess, os,time,gzip
import pandas as pd
import numpy as np
from functools import reduce
from .convertor import mergeToSample, calTNzcore, rmEntrez, tpmToFpkm, mapEm2Gene, formatClin, pick,formatDrug
from .outformat import storeData
import requests,json,re,io
from .setting import CLIN_INFO, Biospecimen_INFO, Biospecimen_MAP, CLIN_MAP, PAM50_PATH, DRUG_MAP
class GdcApi(object):
'''
API for download files from GDC
'''
__slot__ = ["files_endpt", "data_endpt", "cancer", "parental_dir",'cases_endpt']
def __init__(self, cancer, parental_dir, cases_endpt='https://api.gdc.cancer.gov/cases', data_endpt="https://api.gdc.cancer.gov/data", files_endpt="https://api.gdc.cancer.gov/files", **kwargs):
''' Intialize instance parameters
Parameters
----------
cancer : str
Cancer type
parental_dir : str
Path to store datas
data_endpt : str, optional
[Endpoint for files id searching] (the default is "https://api.gdc.cancer.gov/data")
files_endpt : str, optional
[Endpoint for files downloading] (the default is "https://api.gdc.cancer.gov/files")
'''
self.files_endpt = files_endpt
self.data_endpt = data_endpt
self.cancer = cancer
self.parental_dir = parental_dir
self.cases_endpt = cases_endpt
def _projFilter(self, data_type,method=None):
dtype_dict = {
"cnv_segment_somatic": "Masked Copy Number Segment",
"cnv_segment_all": "Copy Number Segment",
"masked_somatic_mutation":"Masked Somatic Mutation",
}
filters = {
"op": "and",
"content":[
{
"op": "in",
"content": {
"field": "files.data_type",
"value": [
dtype_dict[data_type]
]
}
},
{
"op": "in",
"content": {
"field": "cases.project.project_id",
"value": [
"TCGA-"+self.cancer.upper()
]
}
},
]
}
# specific for SNV on TCGA (Calling by four different tools)
if method != None:
filters['content'].append({
"op":"in",
"content":{
"field": "files.analysis.workflow_type",
"value":[
"{} Variant Aggregation and Masking".format(method)
]
}
})
params = {
"filters": json.dumps(filters),
"format": "JSON",
"size": "3000"
}
return params
def _nameFilter(self, data_type):
dtype_dict = {
'drug': "nationwidechildrens.org_clinical_drug_{}.txt".format(self.cancer.lower()),
'gistic': '{}.focal_score_by_genes.txt'.format(self.cancer.upper()),
# 'survival': "nationwidechildrens.org_clinical_follow_up_v{0}_{1}.txt".format(CLIN_VERSION[self.cancer], self.cancer.lower()),
'patient': "nationwidechildrens.org_clinical_patient_{}.txt".format(self.cancer.lower()),
'aliquot': "nationwidechildrens.org_biospecimen_aliquot_{}.txt".format(self.cancer.lower()),
'slide': "nationwidechildrens.org_biospecimen_slide_{}.txt".format(self.cancer.lower()),
'sample': "nationwidechildrens.org_biospecimen_sample_{}.txt".format(self.cancer.lower()),
'auxilary': "nationwidechildrens.org_auxiliary_{}.txt".format(self.cancer.lower()),
}
filters = {
"op": "in",
"content": {
"field": "files.file_name",
"value": [
dtype_dict[data_type]
]
}
}
params = {
"filters": json.dumps(filters),
"format": "JSON",
"size": "1"
}
return params
def _fetchFileID(self, data_type, by_name=True,method=None):
''' Get files id by upstream filter parameters
Parameters
----------
data_type : str
Data type to be download. eg. gistic
by_name : bool, optional
Whether getting files id by matching file names (the default is True).
If not, we will use project filtering options to get file id list.
Returns
-------
list
A list contains file ids.
'''
if by_name is True:
file_uuid_list = []
params = self._nameFilter(data_type)
response = requests.get(self.files_endpt, params=params)
for file_entry in json.loads(response.content.decode("utf-8"))["data"]["hits"]:
file_uuid_list.append(file_entry["file_id"])
else:
file_uuid_list = []
params = self._projFilter(data_type,method=method)
response = requests.get(self.files_endpt, params=params)
if "message" in json.loads(response.content.decode("utf-8")).keys():
return None, 'Not found'
for file_entry in json.loads(response.content.decode("utf-8"))["data"]["hits"]:
file_uuid_list.append(file_entry["file_id"])
if len(file_uuid_list) == 0:
return None,'Not found'
else:
return file_uuid_list,None
def getTableFromFiles(self, data_type, by_name=True,method=None,**kwargs):
'''
Merging tables downloaded by a list of file ids
'''
try:
file_uuid_list, error = self._fetchFileID(
data_type=data_type, by_name=by_name,method=method)
except requests.exceptions.SSLError:
time.sleep(10)
file_uuid_list, error = self._fetchFileID(
data_type=data_type, by_name=by_name,method=method)
if error != None:
return None, error
ready_to_merge = []
if len(file_uuid_list) == 0 :
return None, 'Cannot find any file.'
for ids in file_uuid_list:
params = {"ids": [ids]}
try:
response = requests.post(self.data_endpt, data=json.dumps(
params), headers={"Content-Type": "application/json"})
except requests.exceptions.SSLError:
time.sleep(10)
response = requests.post(self.data_endpt, data=json.dumps(
params), headers={"Content-Type": "application/json"})
if method != None:
temp_file = self.cancer+'_'+method+"_snv_tmp.gz"
file = open(temp_file, "wb")
file.write(response.content)
file.close()
df = pd.read_table(temp_file, **kwargs)
subprocess.call('rm %s' % temp_file ,shell=True)
else:
df = pd.read_table(io.StringIO(
response.content.decode("utf-8")), **kwargs)
ready_to_merge.append(df)
return pd.concat(ready_to_merge,axis=0),None
def getClinInfo(self, fields):
filters = {
"op": "in",
"content": {
"field": "cases.project.project_id",
"value": [
"TCGA-"+self.cancer.upper()
]
}
}
fields = ','.join(fields)
params = {
"filters": json.dumps(filters),
"fields": fields,
"format": "TSV",
"size": "3000"
}
response = requests.get(self.cases_endpt, params=params)
if response.status_code != 200:
time.sleep(10)
response = requests.get(self.cases_endpt, params=params)
try:
result = pd.read_table(io.StringIO(response.content.decode("utf-8")))
error = None
except:
result=None
error='Not Found!'
return result,error
def clin(self):
'''
Downloading clinical information
'''
surs,stderr = self.getClinInfo(fields=CLIN_INFO)
if stderr == None:
surs.rename(columns=CLIN_MAP,inplace=True)
surs = surs[list(CLIN_MAP.values())]
format_surs = formatClin(surs)
storeData(df=format_surs,parental_dir=self.parental_dir,
sub_folder='Surv',cancer=self.cancer)
stderr = ''
else:
stderr = 'Cannot Found\tsurvival_info\t'+self.cancer+'\n'
return stderr
def biospecimen(self):
'''
Downloading biopecimen information
'''
stderr = ''
for sub_folder,files in Biospecimen_INFO.items():
read_to_merge = []
for k, v in files.items():
meta, errors = self.getTableFromFiles(data_type=k)
if errors == None:
meta = meta[meta.columns.intersection(v)]
non_info = pd.Index(v).difference(meta.columns)
for c in non_info:
meta[c] = np.nan
meta.replace('[Not Available]', np.nan, inplace=True)
meta.replace('[Not Applicable]', np.nan, inplace=True)
meta.rename(columns=Biospecimen_MAP,inplace=True)
## header process
if 'bcr_sample_barcode' in v:
meta = meta.drop(0, axis=0)
if k == 'sample':
meta['sample'] = meta['sample'].map(lambda x: x[:-1])
meta = meta.drop_duplicates()
meta['patient'] = meta['sample'].map(lambda x: '-'.join(x.split('-')[:3]))
# elif 'hpv_status' in v:
# meta = meta.drop(0,axis=0)
# else:
# meta = meta.drop([0,1],axis=0)
## additional info
if k == 'slide':
meta = meta.set_index('sample')
meta = meta.apply(pd.to_numeric)
meta = mergeToSample(meta,transpose=True)
# if k == "patient" and self.cancer == 'BRCA':
# pam50 = pd.read_table(PAM50_PATH, index_col=0).rename(columns={
# "PAM50 mRNA":'PAM50'})['PAM50'].to_frame()
# meta = meta.merge(pam50, left_on='patient',right_index=True,how='left')
read_to_merge.append(meta)
else:
stderr += 'Cannot Found\t'+sub_folder+'_'+k+'\t'+self.cancer+'\n'
if len(read_to_merge) > 1:
result = reduce(lambda x,y:pd.merge(x,y, how='outer',on='patient'),read_to_merge).drop_duplicates().dropna(axis=1,how='all')
result = result.set_index('patient')
elif len(read_to_merge) == 1:
result = read_to_merge[0]
else:
continue
## Store tumor and normal info separatelly
# if sub_folder == "histology":
# for s in ['tumor','normal']:
# sub_result = pick(result, source=s, transpose=True)
# storeData(sub_result,
# parental_dir=self.parental_dir,
# sub_folder='/'.join([sub_folder,s]), cancer=self.cancer)
# sub_folder += '/origin'
storeData(result,
parental_dir=self.parental_dir,
sub_folder=sub_folder,cancer=self.cancer)
return stderr
def drug(self):
'''
Downloading Drug information
'''
stderr = ''
df, errors = self.getTableFromFiles(data_type='drug')
if errors == None:
df = df.drop([0,1],axis=0)
df = df.loc[:,df.columns.isin(list(DRUG_MAP.keys()))]
df.rename(columns=DRUG_MAP,inplace=True)
df = formatDrug(df)
df.set_index('patient',inplace=True)
storeData(df=df, parental_dir=self.parental_dir,
sub_folder='Drug', cancer=self.cancer)
else:
stderr += 'Cannot Found\tDrug information for \t'+self.cancer+'\n'
return stderr
def drugDownload(self):
if not os.path.isdir(self.parental_dir):
os.makedirs(self.parental_dir)
# asyn download
download_log_file = '/'.join([self.parental_dir, 'drug_finish.log'])
if os.path.isfile(download_log_file):
with open(download_log_file, 'r') as f:
content = f.readlines()
content = [x.strip() for x in content]
else:
content = []
# begain download if not having been downloaded before
if not self.cancer in content:
with open('/'.join([self.parental_dir, 'drug_stderr.log']), 'a+') as stderrs:
logs = self.drug()
stderrs.write(logs)
with open(download_log_file, 'a+') as f:
f.write(self.cancer+'\n')
def metaDownload(self):
if not os.path.isdir(self.parental_dir):
os.makedirs(self.parental_dir)
# asyn download
download_log_file = '/'.join([self.parental_dir, 'meta_finish.log'])
if os.path.isfile(download_log_file):
with open(download_log_file, 'r') as f:
content = f.readlines()
content = [x.strip() for x in content]
else:
content = []
# begain download if not having been downloaded before
if not self.cancer in content:
with open('/'.join([self.parental_dir, 'meta_stderr.log']), 'a+') as stderrs:
for n in ['biospecimen']:#, 'clin']:
logs = self.__getattribute__(n)()
stderrs.write(logs)
with open(download_log_file, 'a+') as f:
f.write(self.cancer+'\n')
class Workflow(object):
__slot__ = ['cancer', 'parental_dir', 'workflow']
def __init__(self,cancer,parental_dir,workflow):
self.cancer = cancer
self.parental_dir = parental_dir
self.workflow = workflow
def run(self):
if not os.path.isdir(self.parental_dir):
os.makedirs(self.parental_dir)
# asyn download
download_log_file = '/'.join([self.parental_dir, 'finish.log'])
if os.path.isfile(download_log_file):
with open(download_log_file, 'r') as f:
content = f.readlines()
content = [x.strip() for x in content]
else:
content = []
# begain download if not having been downloaded before
if not self.cancer in content:
with open('/'.join([self.parental_dir, 'stderr.log']), 'a+') as stderrs:
for n in self.workflow:
logs = self.__getattribute__(n)()
stderrs.write(logs)
with open(download_log_file, 'a+') as f:
f.write(self.cancer+'\n')
class FireBrowseDnloader(Workflow):
__slot__ = ['release_time']
def __init__(self, release_time="2016_01_28", base_url="http://gdac.broadinstitute.org/runs",**kwargs):
super(FireBrowseDnloader, self).__init__(**kwargs)
self.release_time = release_time
self.base_url = base_url
def _fget(self,data_type, store_dir):
''' Download level 3 data from FireBrowse
Parameters
----------
cancer : str
Cancer type included in TCGA project
data_type : str
Level 3 data type provided by FireBrowse
store_dir : str
Output directory
base_url : str, optional
URL prefix (the default is "http://gdac.broadinstitute.org/runs", which is the prefix provided by FireBrowse)
release_time : str, optional
Release version and this release recored by date. (the default is "2016_01_28", which is the latest available release for now.)
Raises
------
KeyError
if the input parameter is out of provided list.
Returns
-------
str
Run messages. Return 'Success' if no error occurs.
'''
# modifition to adapt CNV data on the function
if data_type == 'cnv_gene_somatic':
release_prefix = 'analyses'
cancer_suffix = '-TP'
if self.cancer == 'SKCM':
cancer_suffix = '-TM'
else:
cancer_suffix = ''
release_prefix = 'stddata'
data_type_dict = {
"rna_raw" : "Merge_rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes__data.Level_3",
"rna_norm": "Merge_rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes_normalized__data.Level_3",
"rppa": "RPPA_AnnotateWithGene.Level_3",
"cnv_gene_somatic": "CopyNumber_Gistic2.Level_4",
"cnv_segment_somatic": "Merge_snp__genome_wide_snp_6__broad_mit_edu__Level_3__segmented_scna_minus_germline_cnv_hg19__seg.Level_3",
"cnv_segment_all": "Merge_snp__genome_wide_snp_6__broad_mit_edu__Level_3__segmented_scna_hg19__seg.Level_3",
}
keep_suffix_dict = {
"rna_raw": "rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes__data.data.txt",
"rppa" : "rppa.txt",
"rna_norm": "rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes_normalized__data.data.txt",
"cnv_gene_somatic": "by_genes.txt",
"cnv_segment_somatic": "snp__genome_wide_snp_6__broad_mit_edu__Level_3__segmented_scna_minus_germline_cnv_hg19__seg.seg.txt",
"cnv_segment_all": "snp__genome_wide_snp_6__broad_mit_edu__Level_3__segmented_scna_hg19__seg.seg.txt",
}
if not data_type in data_type_dict.keys():
raise KeyError("""
{0} is not a valid data type, only accept following input: {1}
""".format(data_type,','.join(data_type_dict.keys())))
short_release_time = "".join(self.release_time.split('_'))
release = release_prefix+"__{release_time}"
sub_folder = "data/{cancer}/{short_release_time}"
file_name = "gdac.broadinstitute.org_{cancer}.{data_type}.{short_release_time}00.0.0.tar.gz"
url = "/".join([self.base_url, release, sub_folder, file_name])
url = url.format(**dict(
cancer=self.cancer+cancer_suffix,
data_type=data_type_dict[data_type],
release_time=self.release_time,
short_release_time=short_release_time,
)
)
cmd ="""
set -x
[[ -d {store_dir}_{cancer}_{data_type}_tmp ]] || mkdir -p {store_dir}_{cancer}_{data_type}_tmp
wget -q -O {store_dir}_{cancer}_{data_type}.gz {url}
tar -xvvf {store_dir}_{cancer}_{data_type}.gz -C {store_dir}_{cancer}_{data_type}_tmp --strip-components=1
rm {store_dir}_{cancer}_{data_type}.gz
if [ $(ls {store_dir}_{cancer}_{data_type}_tmp/*{keep_suffix}| wc -l) -gt 1 ];then
[[ -d {store_dir}_{cancer} ]] || mkdir {store_dir}_{cancer}
fi
mv {store_dir}_{cancer}_{data_type}_tmp/*{keep_suffix} {store_dir}_{cancer}
""".format(**dict(
store_dir=store_dir,
cancer=self.cancer,
keep_suffix=keep_suffix_dict[data_type],
url=url,
data_type=data_type
)
)
try:
subprocess.run(cmd, shell=True,check=True)
log = 'Success'
except subprocess.CalledProcessError as e:
cmd = """
set -x
rm {store_dir}_{cancer}_{data_type}.gz
rm -rf {store_dir}_{cancer}_{data_type}_tmp
""".format(**dict(
store_dir=store_dir,
cancer=self.cancer,
data_type=data_type
)
)
subprocess.run(cmd, shell=True, check=True)
return str(e.returncode)
## process data
cmd = """
rm -rf {store_dir}_{cancer}_{data_type}_tmp
""".format(**dict(
store_dir=store_dir,
cancer=self.cancer,
data_type=data_type
)
)
subprocess.run(cmd,shell=True,check=True)
return log
def _splitCountTPM(self, raw_rnaseq_path):
''' Split one data frame with both count and scaled_estiamte into two data frames and
merge the sample level data frame into pateint level data frame, but keep separating tumor and normal samples.
Then, based on the scaled_estimate column, calculate TPM and RPKM information.
Parameters
----------
raw_rnaseq_path : str
Path to raw rnaseq data download from FireBrowse
Returns
-------
Dict
A dict that contains three pandas.DataFrame, which are raw count, TPM and RPKM.
All of those data frame are index by both Entrez ID and gene symbol and colum named by four digits TCGA barcode.
'''
df = pd.read_table(raw_rnaseq_path, index_col=0,skiprows=[1])
col_selector = pd.read_table(raw_rnaseq_path, index_col=0, nrows=2)
raw_count = df.loc[:, col_selector.iloc[0, :] =='raw_count']
raw_count = mergeToSample(raw_count)
raw_count = round(raw_count)
## Get fpkm and tpm information from transcript fractions
transcipt_fraction = df.loc[:,col_selector.iloc[0, :] == 'scaled_estimate']
tpm = transcipt_fraction * 10e6
normalize_factor = transcipt_fraction.sum(axis=0)
fpkm = transcipt_fraction * normalize_factor * 10e9
tpm = mergeToSample(tpm)
fpkm = mergeToSample(fpkm)
return dict(count=raw_count,tpm=tpm,fpkm=fpkm)
def _formatGistic(self, gistic_path):
''' Formating GISTIC results and sepratate files into segment and gene level
Parameters
----------
gistic_path : str
Path to the folder of gistic output
Returns
-------
dict
Dictionary with files output name as key and pandas.DataFrame as value
'''
f_dict = {
"broad_focal": '{}/all_data_by_genes.txt',
"focal": '{}/focal_data_by_genes.txt',
"threds": '{}/all_thresholded.by_genes.txt'
}
result = {}
for k, v in f_dict.items():
if os.path.isfile(v.format(gistic_path)):
result[k] = pd.read_table(v.format(gistic_path),index_col=0).drop(['Locus ID', 'Cytoband'],axis=1)
return result
def rnaseq(self):
'''
Workflow for downloading RNAseq data from FireBrowse and preprocessing data format.
Parameters
----------
parental_dir : str
Path to parental folder that you want to store the whole RNAseq data
cancer : str
Cancer name you want to download from FireBrowse, it must be a cancer type included in TCGA project.
'''
########################## Raw count and Scale Estimate ##########################
# 1. Fetch raw count and RSEM information from FireBrowse
# 2. Split fetched data frame into raw count and RSEM separatly.
# 3. Merge sample level data into pateint level data, but still separate tumor and normal sample.
# 4. Calculate TPM and RPKM based on RSEM results.
##################################################################################
store_dir = '/'.join([self.parental_dir, 'RNASeq'])
store_dir_raw = '_'.join([store_dir, 'raw'])
store_dir_norm = '_'.join([store_dir, 'norm'])
log = self._fget(data_type='rna_raw',store_dir=store_dir_raw)
if log != 'Success':
return 'Cannot Found\trna_raw\t'+self.cancer+'\n'
raw_rnaseq = self._splitCountTPM(
raw_rnaseq_path='_'.join([store_dir_raw, self.cancer])
)
for name, df in raw_rnaseq.items():
df = rmEntrez(df)
if name in ['fpkm','tpm']:
log_df = np.log2( 1+ df )
tumor_zscore = calTNzcore(log_df, pair_TN=False)
storeData(df=tumor_zscore, parental_dir=store_dir,
sub_folder=name+'/zscore_tumor/', cancer=self.cancer)
try:
paired_zscore = calTNzcore(log_df, pair_TN=True)
storeData(df=paired_zscore, parental_dir=store_dir,
sub_folder=name+'/zscore_paired/', cancer=self.cancer)
except ValueError:
pass
name += '/origin'
storeData(df = df, parental_dir = store_dir,
sub_folder=name, cancer=self.cancer)
subprocess.call(
'rm -rf {}'.format('_'.join([store_dir_raw, self.cancer])), shell=True)
########################## Raw count and Scale Estimate ##########################
# 1. Fetch normalized count from FireBrowse
# 2. remove the second row, which only indicate the normalized count
# 3. Merge sample level data into pateint level data, but still separate tumor and normal sample.
##################################################################################
log = self._fget(data_type='rna_norm',store_dir=store_dir_norm)
if log != 'Success':
return 'Cannot Found\trna_norm\t'+self.cancer+'\n'
rnaseq_norm = pd.read_table(
'_'.join([store_dir_norm, self.cancer]), index_col=0, skiprows=[1])
rnaseq_norm = mergeToSample(rnaseq_norm)
rnaseq_norm = rmEntrez(rnaseq_norm)
storeData(df=rnaseq_norm, parental_dir=store_dir,
sub_folder='norm_count/origin', cancer=self.cancer)
subprocess.call(
'rm -rf {}'.format('_'.join([store_dir_norm, self.cancer])), shell=True)
return ''
def cnv(self):
'''
Workflow for downloading copy number variation data from FireBrowse and preprocessing data format.
Parameters
----------
parental_dir : str
Path to parental folder that you want to store the whole copy number variation data
cancer : str
Cancer name you want to download from FireBrowse, it must be a cancer type included in TCGA project.
'''
## Gene
store_dir = '/'.join([self.parental_dir, 'CNV/somatic', 'gene'])
log = self._fget( data_type='cnv_gene_somatic',store_dir=store_dir)
if log != 'Success':
return 'Cannot Found\tcnv_gene_somatic\t'+self.cancer+'\n'
cnv_gene = self._formatGistic(
gistic_path='_'.join([store_dir, self.cancer]))
for name, df in cnv_gene.items():
df = mergeToSample(df)
storeData(df=df, parental_dir=store_dir,
sub_folder=name, cancer=self.cancer)
subprocess.call(
'rm -rf {}'.format('_'.join([store_dir, self.cancer])), shell=True)
## Segment
for lv in ['somatic','all']:
store_dir = '/'.join([self.parental_dir, 'CNV/'+lv, 'segment'])
log = self._fget(data_type='cnv_segment_'+lv, store_dir=store_dir)
if log != 'Success':
return 'Cannot Found\t' + 'cnv_segment_'+lv+'\t'+self.cancer+'\n'
if not os.path.exists(store_dir):
os.makedirs(store_dir)
subprocess.call(
'mv {0} {1}'.format('_'.join([store_dir, self.cancer]),
'/'.join([store_dir, self.cancer])
),
shell=True)
return ''
def rppa(self):
'''
Workflow for downloading RPPA data from FireBrowse and preprocessing data format.
Parameters
----------
parental_dir : str
Path to parental folder that you want to store the whole RPPA data
cancer : str
Cancer name you want to download from FireBrowse, it must be a cancer type included in TCGA project.
'''
store_dir = '/'.join([self.parental_dir, 'RPPA'])
log=self._fget(data_type='rppa',store_dir=store_dir)
if log != 'Success':
return 'Cannot Found\trppa\t'+self.cancer+'\n'
rppa = pd.read_table(
'_'.join([store_dir,self.cancer]), index_col=0)
rppa = rmEntrez(rppa)
rppa = mergeToSample(rppa)
storeData(df=rppa, parental_dir=store_dir,
sub_folder='', cancer=self.cancer)
subprocess.call(
'rm -rf {}'.format('_'.join([store_dir, self.cancer])), shell=True)
return ''
def snv(self):
'''
Please use MC3 downloader to fetch the SNV result for all cancer in TCGA,
which is more robust.
'''
return 'GO TO MC3\tsnv\t'+self.cancer+'\n'
class GdcDnloader(GdcApi, Workflow):
__slot__ = ['type_available', 'base_url']
def __init__(self, base_url="https://gdc.xenahubs.net/download/",**kwargs):
Workflow.__init__(self,**kwargs)
GdcApi.__init__(self, cancer=self.cancer,parental_dir=self.parental_dir)
# super(GdcDnloader, self).__init__(data_endpt="https://api.gdc.cancer.gov/data",files_endpt="https://api.gdc.cancer.gov/files",**kwargs)
# data-release-80
self.base_url = base_url
self.type_available = {
'RNASeq': ['fpkm','count','fpkm_uq'],
'SNV': ['MuSE', "MuTect2", "VarScan2", "SomaticSniper"],
'cnv': ['somatic','all']
}
def _fget(self, data_type, store_dir):
'''Download level 3 data from Xenas
Parameters
----------
data_type : str
Data type to be downloaded
store_dir : str
Path to store the data
Raises
------
KeyError
If cannot fetching the files
Returns
-------
str
Tell if the downloading is successful or not
'''
data_type_dict = {
'fpkm': "htseq_fpkm",
'count':"htseq_counts",
'fpkm_uq': "htseq_fpkm-uq",
'muse': "muse_snv",
"mutect2": "mutect2_snv",
"VarScan2": "varscan2_snv",
"SomaticSnipe":"somaticsniper_snv",
}
if not data_type in data_type_dict.keys():
raise KeyError("""
{0} is not a valid data type, only accept following input: {1}
""".format(data_type, ','.join(data_type_dict.keys())))
# https: // gdc.xenahubs.net/download/TCGA-CHOL/Xena_Matrices/TCGA-CHOL.htseq_fpkm.tsv.gz
subpath = 'TCGA-{cancer}/Xena_Matrices/TCGA-{cancer}.{data_type}.tsv.gz'
url = "/".join([self.base_url, subpath])
url = url.format(**dict(
cancer=self.cancer,
data_type=data_type_dict[data_type]
)
)
cmd = """
set -x
[[ -d {store_dir} ]] || mkdir -p {store_dir}
wget -q -O {store_dir}/{cancer}.gz {url}
""".format(**dict(
store_dir=store_dir,
cancer=self.cancer,
url=url,
)
)
try:
subprocess.run(cmd, shell=True, check=True)
log = 'Success'
cmd = "set -x; gunzip {store_dir}/{cancer}.gz".format(**dict(store_dir=store_dir,
cancer=self.cancer))
except subprocess.CalledProcessError as e:
log = str(e.returncode)
cmd = "set -x; rm {store_dir}/{cancer}.gz".format(**dict(store_dir=store_dir,
cancer=self.cancer))
subprocess.run(cmd, shell=True, check=True)
return log
def rnaseq(self):
store_parental = '/'.join([self.parental_dir, 'RNASeq'])
for name in self.type_available['RNASeq']:
store_dir = '/'.join([store_parental, name])
log = self._fget(data_type=name, store_dir=store_dir)
if log != 'Success':
return 'Cannot Found\t' + name+'\t'+self.cancer+'\n'
df = pd.read_table('/'.join([store_dir,self.cancer]),index_col=0)
df = np.exp2(df) - 1 # since all matrix download from xenas have been log transformed
df = mergeToSample(df)
df = mapEm2Gene(df)
if name == 'fpkm':
tpm = tpmToFpkm(df, reverse=True)
for raw_name,raw_df in {'tpm':tpm,'fpkm':df}.items():
log_df = np.log2(1 + raw_df)
tumor_zscore = calTNzcore(log_df, pair_TN=False)
storeData(df=tumor_zscore, parental_dir=store_parental,
sub_folder=raw_name+'/zscore_tumor/', cancer=self.cancer)
try:
paired_zscore = calTNzcore(log_df, pair_TN=True)
storeData(df=paired_zscore, parental_dir=store_parental,
sub_folder=raw_name+'/zscore_paired/', cancer=self.cancer)
except ValueError:
pass
storeData(df=raw_df, parental_dir=store_parental,
sub_folder=raw_name+'/origin', cancer=self.cancer)
else:
if name == 'count':
df = df.round(0)
storeData(df=df, parental_dir=store_parental,
sub_folder=name+'/origin', cancer=self.cancer)
subprocess.call(
'rm -rf {}'.format('/'.join([store_dir, self.cancer])), shell=True)
return ''
def snv(self):
for m in self.type_available['SNV']:
df, errors = self.getTableFromFiles(
data_type='masked_somatic_mutation', by_name=False,method=m,comment='#')
if errors != None:
return 'Cannot Found\t'+m+'\t'+self.cancer+'\n'
else:
# df.rename(columns={"Hugo_Symbol":"gene"},inplace=True)
# df.insert(0, 'sample', df["Tumor_Sample_Barcode"].map(
# lambda x: '-'.join(x.split('-')[:4])[:-1]))
store_parental = '/'.join([self.parental_dir, 'SNV'])
storeData(df=df, parental_dir=store_parental,
sub_folder=m, cancer=self.cancer)
return ''
def cnv(self):
store_parental = '/'.join([self.parental_dir, 'CNV'])
# meta data
## map uuid to barcode
meta, errors = self.getTableFromFiles(data_type='aliquot')
if errors != None:
return 'Cannot Found\tuuid map barcode\t'+self.cancer+'\n'
meta = meta.dropna(
axis=0).set_index('bcr_aliquot_uuid')
meta.index = meta.index.map(lambda x: x.lower())
meta = meta['bcr_sample_barcode'].to_dict()
stderr = ''
# focal data
df,errors = self.getTableFromFiles(data_type='gistic')
if errors == None:
df = df.set_index('Gene Symbol').drop(['Gene ID', 'Cytoband'],axis=1)
df.columns = df.columns.map(meta)
df = mergeToSample(df)
df = mapEm2Gene(df)
storeData(df=df, parental_dir=store_parental,
sub_folder='somatic/gene/focal', cancer=self.cancer)
else:
stderr += 'Cannot Found\tgistic\t'+self.cancer+'\n'
# Segment data
## somatic
df, errors = self.getTableFromFiles(data_type='cnv_segment_somatic', by_name=False)
if errors == None:
df['GDC_Aliquot'] = df['GDC_Aliquot'].map(meta)
storeData(df=df, parental_dir=store_parental,
sub_folder='somatic/segment', cancer=self.cancer,index=False)
else:
stderr += 'Cannot Found\tcnv_segment_somatic\t'+self.cancer+'\n'
# all
df, errors = self.getTableFromFiles(data_type='cnv_segment_all', by_name=False)
if errors == None:
df['GDC_Aliquot'] = df['GDC_Aliquot'].map(meta)
storeData(df=df, parental_dir=store_parental,
sub_folder='all/segment', cancer=self.cancer, index=False)
else:
stderr += 'Cannot Found\tcnv_segment_all\t'+self.cancer +'\n'
return stderr
def rppa(self):
# RPPA data for hg38 is not available.
return 'Not Available\trppa\t'+self.cancer + '\n'
``` |
{
"source": "jingxiufenghua/rec-model",
"score": 3
} |
#### File: rec-model/AFM/model.py
```python
import itertools
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.layers import Embedding, Dropout, Dense, Dropout, Input
class AFM(Model):
def __init__(self, feature_columns, mode, att_vector=8, activation='relu', dropout=0.5, embed_reg=1e-6):
"""
AFM
:param feature_columns: A list. sparse column feature information.
:param mode: A string. 'max'(MAX Pooling) or 'avg'(Average Pooling) or 'att'(Attention)
:param att_vector: A scalar. attention vector.
:param activation: A string. Activation function of attention.
:param dropout: A scalar. Dropout.
:param embed_reg: A scalar. the regularizer of embedding
"""
super(AFM, self).__init__()
self.sparse_feature_columns = feature_columns
self.mode = mode
self.embed_layers = {
'embed_' + str(i): Embedding(input_dim=feat['feat_num'],
input_length=1,
output_dim=feat['embed_dim'],
embeddings_initializer='random_uniform',
embeddings_regularizer=l2(embed_reg))
for i, feat in enumerate(self.sparse_feature_columns)
}
if self.mode == 'att':
self.attention_W = Dense(units=att_vector, activation=activation, use_bias=True)
self.attention_dense = Dense(units=1, activation=None)
self.dropout = Dropout(dropout)
self.dense = Dense(units=1, activation=None)
def call(self, inputs):
# Input Layer
sparse_inputs = inputs
# Embedding Layer
embed = [self.embed_layers['embed_{}'.format(i)](sparse_inputs[:, i]) for i in range(sparse_inputs.shape[1])]
embed = tf.transpose(tf.convert_to_tensor(embed), perm=[1, 0, 2]) # (None, len(sparse_inputs), embed_dim)
# Pair-wise Interaction Layer
row = []
col = []
for r, c in itertools.combinations(range(len(self.sparse_feature_columns)), 2):
row.append(r)
col.append(c)
p = tf.gather(embed, row, axis=1) # (None, (len(sparse) * len(sparse) - 1) / 2, k)
q = tf.gather(embed, col, axis=1) # (None, (len(sparse) * len(sparse) - 1) / 2, k)
bi_interaction = p * q # (None, (len(sparse) * len(sparse) - 1) / 2, k)
# mode
if self.mode == 'max':
# MaxPooling Layer
x = tf.reduce_sum(bi_interaction, axis=1) # (None, k)
elif self.mode == 'avg':
# AvgPooling Layer
x = tf.reduce_mean(bi_interaction, axis=1) # (None, k)
else:
# Attention Layer
x = self.attention(bi_interaction) # (None, k)
# Output Layer
outputs = tf.nn.sigmoid(self.dense(x))
return outputs
def summary(self):
sparse_inputs = Input(shape=(len(self.sparse_feature_columns),), dtype=tf.int32)
Model(inputs=sparse_inputs, outputs=self.call(sparse_inputs)).summary()
def attention(self, bi_interaction):
a = self.attention_W(bi_interaction) # (None, (len(sparse) * len(sparse) - 1) / 2, t)
a = self.attention_dense(a) # (None, (len(sparse) * len(sparse) - 1) / 2, 1)
a_score = tf.nn.softmax(a, axis=1) # (None, (len(sparse) * len(sparse) - 1) / 2, 1)
outputs = tf.reduce_sum(bi_interaction * a_score, axis=1) # (None, embed_dim)
return outputs
```
#### File: rec-model/AttRec/model.py
```python
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Embedding, Input
from tensorflow.keras.regularizers import l2
from modules import *
class AttRec(Model):
def __init__(self, feature_columns, maxlen=40, mode='inner', gamma=0.5, w=0.5, embed_reg=1e-6, **kwargs):
"""
AttRec
:param feature_columns: A feature columns list. user + seq
:param maxlen: A scalar. In the paper, maxlen is L, the number of latest items.
:param gamma: A scalar. if mode == 'dist', gamma is the margin.
:param mode: A string. inner or dist.
:param w: A scalar. The weight of short interest.
:param embed_reg: A scalar. The regularizer of embedding.
"""
super(AttRec, self).__init__(**kwargs)
# maxlen
self.maxlen = maxlen
# w
self.w = w
self.gamma = gamma
self.mode = mode
# feature columns
self.user_fea_col, self.item_fea_col = feature_columns
# embed_dim
self.embed_dim = self.item_fea_col['embed_dim']
# user embedding
self.user_embedding = Embedding(input_dim=self.user_fea_col['feat_num'],
input_length=1,
output_dim=self.user_fea_col['embed_dim'],
mask_zero=False,
embeddings_initializer='random_normal',
embeddings_regularizer=l2(embed_reg))
# item embedding
self.item_embedding = Embedding(input_dim=self.item_fea_col['feat_num'],
input_length=1,
output_dim=self.item_fea_col['embed_dim'],
mask_zero=True,
embeddings_initializer='random_normal',
embeddings_regularizer=l2(embed_reg))
# item2 embedding, not share embedding
self.item2_embedding = Embedding(input_dim=self.item_fea_col['feat_num'],
input_length=1,
output_dim=self.item_fea_col['embed_dim'],
mask_zero=True,
embeddings_initializer='random_normal',
embeddings_regularizer=l2(embed_reg))
# self-attention
self.self_attention = SelfAttention_Layer()
def call(self, inputs, **kwargs):
# input
user_inputs, seq_inputs, pos_inputs, neg_inputs = inputs
# mask
# mask = self.item_embedding.compute_mask(seq_inputs)
mask = tf.cast(tf.not_equal(seq_inputs, 0), dtype=tf.float32) # (None, maxlen)
# user info
user_embed = self.user_embedding(tf.squeeze(user_inputs, axis=-1)) # (None, dim)
# seq info
seq_embed = self.item_embedding(seq_inputs) # (None, maxlen, dim)
# item
pos_embed = self.item_embedding(tf.squeeze(pos_inputs, axis=-1)) # (None, dim)
neg_embed = self.item_embedding(tf.squeeze(neg_inputs, axis=-1)) # (None, dim)
# item2 embed
pos_embed2 = self.item2_embedding(tf.squeeze(pos_inputs, axis=-1)) # (None, dim)
neg_embed2 = self.item2_embedding(tf.squeeze(neg_inputs, axis=-1)) # (None, dim)
# short-term interest
short_interest = self.self_attention([seq_embed, seq_embed, seq_embed, mask]) # (None, dim)
# mode
if self.mode == 'inner':
# long-term interest, pos and neg
pos_long_interest = tf.multiply(user_embed, pos_embed2)
neg_long_interest = tf.multiply(user_embed, neg_embed2)
# combine
pos_scores = self.w * tf.reduce_sum(pos_long_interest, axis=-1, keepdims=True) \
+ (1 - self.w) * tf.reduce_sum(tf.multiply(short_interest, pos_embed), axis=-1, keepdims=True)
neg_scores = self.w * tf.reduce_sum(neg_long_interest, axis=-1, keepdims=True) \
+ (1 - self.w) * tf.reduce_sum(tf.multiply(short_interest, neg_embed), axis=-1, keepdims=True)
self.add_loss(tf.reduce_mean(-tf.math.log(tf.nn.sigmoid(pos_scores - neg_scores))))
else:
# clip by norm
user_embed = tf.clip_by_norm(user_embed, 1, -1)
pos_embed = tf.clip_by_norm(pos_embed, 1, -1)
neg_embed = tf.clip_by_norm(neg_embed, 1, -1)
pos_embed2 = tf.clip_by_norm(pos_embed2, 1, -1)
neg_embed2 = tf.clip_by_norm(neg_embed2, 1, -1)
# distance
# long-term interest, pos and neg
pos_long_interest = tf.square(user_embed - pos_embed2) # (None, dim)
neg_long_interest = tf.square(user_embed - neg_embed2) # (None, dim)
# combine. Here is a difference from the original paper.
pos_scores = self.w * tf.reduce_sum(pos_long_interest, axis=-1, keepdims=True) + \
(1 - self.w) * tf.reduce_sum(tf.square(short_interest - pos_embed), axis=-1, keepdims=True)
neg_scores = self.w * tf.reduce_sum(neg_long_interest, axis=-1, keepdims=True) + \
(1 - self.w) * tf.reduce_sum(tf.square(short_interest - neg_embed), axis=-1, keepdims=True)
# minimize loss
# self.add_loss(tf.reduce_sum(tf.maximum(pos_scores - neg_scores + self.gamma, 0)))
self.add_loss(tf.reduce_sum(tf.nn.relu(pos_scores - neg_scores + self.gamma)))
return pos_scores, neg_scores
def summary(self):
seq_inputs = Input(shape=(self.maxlen,), dtype=tf.int32)
user_inputs = Input(shape=(1, ), dtype=tf.int32)
pos_inputs = Input(shape=(1, ), dtype=tf.int32)
neg_inputs = Input(shape=(1, ), dtype=tf.int32)
Model(inputs=[user_inputs, seq_inputs, pos_inputs, neg_inputs],
outputs=self.call([user_inputs, seq_inputs, pos_inputs, neg_inputs])).summary()
def test_model():
user_features = {'feat': 'user_id', 'feat_num': 100, 'embed_dim': 8}
seq_features = {'feat': 'item_id', 'feat_num': 100, 'embed_dim': 8}
features = [user_features, seq_features]
model = AttRec(features, mode='dist')
model.summary()
# test_model()
```
#### File: rec-model/BPR/model.py
```python
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Embedding, Input
from tensorflow.keras.regularizers import l2
class BPR(Model):
def __init__(self, feature_columns, mode='inner', embed_reg=1e-6):
"""
BPR
:param feature_columns: A list. user feature columns + item feature columns
:mode: A string. 'inner' or 'dist'.
:param embed_reg: A scalar. The regularizer of embedding.
"""
super(BPR, self).__init__()
# feature columns
self.user_fea_col, self.item_fea_col = feature_columns
# mode
self.mode = mode
# user embedding
self.user_embedding = Embedding(input_dim=self.user_fea_col['feat_num'],
input_length=1,
output_dim=self.user_fea_col['embed_dim'],
mask_zero=False,
embeddings_initializer='random_normal',
embeddings_regularizer=l2(embed_reg))
# item embedding
self.item_embedding = Embedding(input_dim=self.item_fea_col['feat_num'],
input_length=1,
output_dim=self.item_fea_col['embed_dim'],
mask_zero=True,
embeddings_initializer='random_normal',
embeddings_regularizer=l2(embed_reg))
def call(self, inputs):
user_inputs, pos_inputs, neg_inputs = inputs # (None, 1), (None, 1)
# user info
user_embed = self.user_embedding(user_inputs) # (None, 1, dim)
# item
pos_embed = self.item_embedding(pos_inputs) # (None, 1, dim)
neg_embed = self.item_embedding(neg_inputs) # (None, 1, dim)
if self.mode == 'inner':
# calculate positive item scores and negative item scores
pos_scores = tf.reduce_sum(tf.multiply(user_embed, pos_embed), axis=-1) # (None, 1)
neg_scores = tf.reduce_sum(tf.multiply(user_embed, neg_embed), axis=-1) # (None, 1)
# add loss. Computes softplus: log(exp(features) + 1)
# self.add_loss(tf.reduce_mean(tf.math.softplus(neg_scores - pos_scores)))
self.add_loss(tf.reduce_mean(-tf.math.log(tf.nn.sigmoid(pos_scores - neg_scores))))
else:
# clip by norm
# user_embed = tf.clip_by_norm(user_embed, 1, -1)
# pos_embed = tf.clip_by_norm(pos_embed, 1, -1)
# neg_embed = tf.clip_by_norm(neg_embed, 1, -1)
pos_scores = tf.reduce_sum(tf.square(user_embed - pos_embed), axis=-1)
neg_scores = tf.reduce_sum(tf.square(user_embed - neg_embed), axis=-1)
self.add_loss(tf.reduce_sum(tf.nn.relu(pos_scores - neg_scores + 0.5)))
logits = tf.concat([pos_scores, neg_scores], axis=-1)
return logits
def summary(self):
user_inputs = Input(shape=(1, ), dtype=tf.int32)
pos_inputs = Input(shape=(1, ), dtype=tf.int32)
neg_inputs = Input(shape=(1, ), dtype=tf.int32)
Model(inputs=[user_inputs, pos_inputs, neg_inputs],
outputs=self.call([user_inputs, pos_inputs, neg_inputs])).summary()
def test_model():
user_features = {'feat': 'user_id', 'feat_num': 100, 'embed_dim': 8}
item_features = {'feat': 'item_id', 'feat_num': 100, 'embed_dim': 8}
features = [user_features, item_features]
model = BPR(features)
model.summary()
# test_model()
```
#### File: rec-model/Caser/utils.py
```python
import pandas as pd
import numpy as np
import random
from tqdm import tqdm
from tensorflow.keras.preprocessing.sequence import pad_sequences
def sparseFeature(feat, feat_num, embed_dim=4):
"""
create dictionary for sparse feature
:param feat: feature name
:param feat_num: the total number of sparse features that do not repeat
:param embed_dim: embedding dimension
:return:
"""
return {'feat': feat, 'feat_num': feat_num, 'embed_dim': embed_dim}
def create_implicit_ml_1m_dataset(file, trans_score=2, embed_dim=8, maxlen=40):
"""
:param file: A string. dataset path.
:param trans_score: A scalar. Greater than it is 1, and less than it is 0.
:param embed_dim: A scalar. latent factor.
:param maxlen: A scalar. maxlen.
:return: user_num, item_num, train_df, test_df
"""
print('==========Data Preprocess Start=============')
data_df = pd.read_csv(file, sep="::", engine='python',
names=['user_id', 'item_id', 'label', 'Timestamp'])
# implicit dataset
data_df = data_df[data_df.label >= trans_score]
# sort
data_df = data_df.sort_values(by=['user_id', 'Timestamp'])
train_data, val_data, test_data = [], [], []
item_id_max = data_df['item_id'].max()
for user_id, df in tqdm(data_df[['user_id', 'item_id']].groupby('user_id')):
pos_list = df['item_id'].tolist()
def gen_neg():
neg = pos_list[0]
while neg in pos_list:
neg = random.randint(1, item_id_max)
return neg
neg_list = [gen_neg() for i in range(len(pos_list) + 100)]
for i in range(1, len(pos_list)):
hist_i = pos_list[:i]
if i == len(pos_list) - 1:
test_data.append([user_id, hist_i, pos_list[i], 1])
for neg in neg_list[i:]:
test_data.append([user_id, hist_i, neg, 0])
elif i == len(pos_list) - 2:
val_data.append([user_id, hist_i, pos_list[i], 1])
val_data.append([user_id, hist_i, neg_list[i], 0])
else:
train_data.append([user_id, hist_i, pos_list[i], 1])
train_data.append([user_id, hist_i, neg_list[i], 0])
# item feature columns
user_num, item_num = data_df['user_id'].max() + 1, data_df['item_id'].max() + 1
feature_columns = [sparseFeature('user_id', user_num, embed_dim),
sparseFeature('item_id', item_num, embed_dim)]
# shuffle
random.shuffle(train_data)
random.shuffle(val_data)
# random.shuffle(test_data)
# create dataframe
train = pd.DataFrame(train_data, columns=['user_id', 'hist', 'target_item', 'label'])
val = pd.DataFrame(val_data, columns=['user_id', 'hist', 'target_item', 'label'])
test = pd.DataFrame(test_data, columns=['user_id', 'hist', 'target_item', 'label'])
print('==================Padding===================')
train_X = [train['user_id'].values, pad_sequences(train['hist'], maxlen=maxlen), train['target_item'].values]
train_y = train['label'].values
val_X = [val['user_id'].values, pad_sequences(val['hist'], maxlen=maxlen), val['target_item'].values]
val_y = val['label'].values
test_X = [test['user_id'].values, pad_sequences(test['hist'], maxlen=maxlen), test['target_item'].values]
test_y = test['label'].values.tolist()
print('============Data Preprocess End=============')
return feature_columns, (train_X, train_y), (val_X, val_y), (test_X, test_y)
# create_implicit_ml_1m_dataset('../dataset/ml-1m/ratings.dat')
```
#### File: rec-model/data_process/utils.py
```python
def sparseFeature(feat, feat_num, embed_dim=4):
"""
create dictionary for sparse feature
:param feat: feature name
:param feat_num: the total number of sparse features that do not repeat
:param embed_dim: embedding dimension
:return:
"""
return {'feat_name': feat, 'feat_num': feat_num, 'embed_dim': embed_dim}
def denseFeature(feat):
"""
create dictionary for dense feature
:param feat: dense feature name
:return:
"""
return {'feat_name': feat}
```
#### File: rec-model/Deep_Crossing/model.py
```python
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.layers import Embedding, Dense, Dropout, Input
from modules import Residual_Units
class Deep_Crossing(Model):
def __init__(self, feature_columns, hidden_units, res_dropout=0., embed_reg=1e-6):
"""
Deep&Crossing
:param feature_columns: A list. sparse column feature information.
:param hidden_units: A list. Neural network hidden units.
:param res_dropout: A scalar. Dropout of resnet.
:param embed_reg: A scalar. The regularizer of embedding.
"""
super(Deep_Crossing, self).__init__()
self.sparse_feature_columns = feature_columns
self.embed_layers = {
'embed_' + str(i): Embedding(input_dim=feat['feat_num'],
input_length=1,
output_dim=feat['embed_dim'],
embeddings_initializer='random_uniform',
embeddings_regularizer=l2(embed_reg))
for i, feat in enumerate(self.sparse_feature_columns)
}
# the total length of embedding layers
embed_layers_len = sum([feat['embed_dim'] for feat in self.sparse_feature_columns])
self.res_network = [Residual_Units(unit, embed_layers_len) for unit in hidden_units]
self.res_dropout = Dropout(res_dropout)
self.dense = Dense(1, activation=None)
def call(self, inputs):
sparse_inputs = inputs
sparse_embed = tf.concat([self.embed_layers['embed_{}'.format(i)](sparse_inputs[:, i])
for i in range(sparse_inputs.shape[1])], axis=-1)
r = sparse_embed
for res in self.res_network:
r = res(r)
r = self.res_dropout(r)
outputs = tf.nn.sigmoid(self.dense(r))
return outputs
def summary(self):
sparse_inputs = Input(shape=(len(self.sparse_feature_columns),), dtype=tf.int32)
Model(inputs=sparse_inputs, outputs=self.call(sparse_inputs)).summary()
```
#### File: rec-model/DeepFM/modules.py
```python
import tensorflow as tf
from tensorflow.keras.regularizers import l2
from tensorflow.keras.layers import Dropout, Dense, Layer
class FM(Layer):
"""
Wide part
"""
def __init__(self, feature_length, w_reg=1e-6):
"""
Factorization Machine
In DeepFM, only the first order feature and second order feature intersect are included.
:param feature_length: A scalar. The length of features.
:param w_reg: A scalar. The regularization coefficient of parameter w.
"""
super(FM, self).__init__()
self.feature_length = feature_length
self.w_reg = w_reg
def build(self, input_shape):
self.w = self.add_weight(name='w', shape=(self.feature_length, 1),
initializer='random_normal',
regularizer=l2(self.w_reg),
trainable=True)
def call(self, inputs, **kwargs):
"""
:param inputs: A dict with shape `(batch_size, {'sparse_inputs', 'embed_inputs'})`:
sparse_inputs is 2D tensor with shape `(batch_size, sum(field_num))`
embed_inputs is 3D tensor with shape `(batch_size, fields, embed_dim)`
"""
sparse_inputs, embed_inputs = inputs['sparse_inputs'], inputs['embed_inputs']
# first order
first_order = tf.reduce_sum(tf.nn.embedding_lookup(self.w, sparse_inputs), axis=1) # (batch_size, 1)
# second order
square_sum = tf.square(tf.reduce_sum(embed_inputs, axis=1, keepdims=True)) # (batch_size, 1, embed_dim)
sum_square = tf.reduce_sum(tf.square(embed_inputs), axis=1, keepdims=True) # (batch_size, 1, embed_dim)
second_order = 0.5 * tf.reduce_sum(square_sum - sum_square, axis=2) # (batch_size, 1)
return first_order + second_order
class DNN(Layer):
"""
Deep part
"""
def __init__(self, hidden_units, activation='relu', dnn_dropout=0.):
"""
DNN part
:param hidden_units: A list like `[unit1, unit2,...,]`. List of hidden layer units's numbers
:param activation: A string. Activation function.
:param dnn_dropout: A scalar. dropout number.
"""
super(DNN, self).__init__()
self.dnn_network = [Dense(units=unit, activation=activation) for unit in hidden_units]
self.dropout = Dropout(dnn_dropout)
def call(self, inputs, **kwargs):
x = inputs
for dnn in self.dnn_network:
x = dnn(x)
x = self.dropout(x)
return x
```
#### File: DIN/preprocess/1_convert_pd.py
```python
import pickle
import pandas as pd
def to_df(file_path):
"""
转化为DataFrame结构
:param file_path: 文件路径
:return:
"""
with open(file_path, 'r') as fin:
df = {}
i = 0
for line in fin:
df[i] = eval(line)
i += 1
df = pd.DataFrame.from_dict(df, orient='index')
return df
reviews_df = to_df('../raw_data/reviews_Electronics_5.json')
# 改变列的顺序
# reviews2_df = pd.read_json('../raw_data/reviews_Electronics_5.json', lines=True)
with open('../raw_data/reviews.pkl', 'wb') as f:
pickle.dump(reviews_df, f, pickle.HIGHEST_PROTOCOL)
meta_df = to_df('../raw_data/meta_Electronics.json')
meta_df = meta_df[meta_df['asin'].isin(reviews_df['asin'].unique())]
meta_df = meta_df.reset_index(drop=True)
with open('../raw_data/meta.pkl', 'wb') as f:
pickle.dump(meta_df, f, pickle.HIGHEST_PROTOCOL)
```
#### File: rec-model/PNN/model.py
```python
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.layers import Embedding, Dense, Layer, Dropout, Input
from modules import DNN
class PNN(Model):
def __init__(self, feature_columns, hidden_units, mode='in', dnn_dropout=0.,
activation='relu', embed_reg=1e-6, w_z_reg=1e-6, w_p_reg=1e-6, l_b_reg=1e-6):
"""
Product-based Neural Networks
:param feature_columns: A list. sparse column feature information.
:param hidden_units: A list. Neural network hidden units.
:param mode: A string. 'in' IPNN or 'out'OPNN.
:param activation: A string. Activation function of dnn.
:param dnn_dropout: A scalar. Dropout of dnn.
:param embed_reg: A scalar. The regularizer of embedding.
:param w_z_reg: A scalar. The regularizer of w_z_ in product layer
:param w_p_reg: A scalar. The regularizer of w_p in product layer
:param l_b_reg: A scalar. The regularizer of l_b in product layer
"""
super(PNN, self).__init__()
# inner product or outer product
self.mode = mode
self.sparse_feature_columns = feature_columns
# the number of feature fields
self.field_num = len(self.sparse_feature_columns)
self.embed_dim = self.sparse_feature_columns[0]['embed_dim']
# The embedding dimension of each feature field must be the same
self.embed_layers = {
'embed_' + str(i): Embedding(input_dim=feat['feat_num'],
input_length=1,
output_dim=feat['embed_dim'],
embeddings_initializer='random_uniform',
embeddings_regularizer=l2(embed_reg))
for i, feat in enumerate(self.sparse_feature_columns)
}
# parameters
self.w_z = self.add_weight(name='w_z',
shape=(self.field_num, self.embed_dim, hidden_units[0]),
initializer='random_uniform',
regularizer=l2(w_z_reg),
trainable=True
)
if mode == 'in':
self.w_p = self.add_weight(name='w_p',
shape=(self.field_num * (self.field_num - 1) // 2, self.embed_dim,
hidden_units[0]),
initializer='random_uniform',
reguarizer=l2(w_p_reg),
trainable=True)
# out
else:
self.w_p = self.add_weight(name='w_p',
shape=(self.field_num * (self.field_num - 1) // 2, self.embed_dim,
self.embed_dim, hidden_units[0]),
initializer='random_uniform',
regularizer=l2(w_p_reg),
trainable=True)
self.l_b = self.add_weight(name='l_b', shape=(hidden_units[0], ),
initializer='random_uniform',
regularizer=l2(l_b_reg),
trainable=True)
# dnn
self.dnn_network = DNN(hidden_units[1:], activation, dnn_dropout)
self.dense_final = Dense(1)
def call(self, inputs):
sparse_inputs = inputs
sparse_embed = [self.embed_layers['embed_{}'.format(i)](sparse_inputs[:, i])
for i in range(sparse_inputs.shape[1])]
sparse_embed = tf.transpose(tf.convert_to_tensor(sparse_embed), [1, 0, 2]) # (None, field_num, embed_dim)
# product layer
row = []
col = []
for i in range(len(self.sparse_feature_columns) - 1):
for j in range(i + 1, len(self.sparse_feature_columns)):
row.append(i)
col.append(j)
p = tf.gather(sparse_embed, row, axis=1)
q = tf.gather(sparse_embed, col, axis=1)
if self.mode == 'in':
l_p = tf.tensordot(p*q, self.w_p, axes=2) # (None, hidden[0])
else: # out
u = tf.expand_dims(q, 2) # (None, field_num(field_num-1)/2, 1, emb_dim)
v = tf.expand_dims(p, 2) # (None, field_num(field_num-1)/2, 1, emb_dim)
l_p = tf.tensordot(tf.matmul(tf.transpose(u, [0, 1, 3, 2]), v), self.w_p, axes=3) # (None, hidden[0])
l_z = tf.tensordot(sparse_embed, self.w_z, axes=2) # (None, hidden[0])
l_1 = tf.nn.relu(tf.concat([l_z + l_p + self.l_b], axis=-1))
# dnn layer
dnn_x = self.dnn_network(l_1)
outputs = tf.nn.sigmoid(self.dense_final(dnn_x))
return outputs
def summary(self):
sparse_inputs = Input(shape=(len(self.sparse_feature_columns),), dtype=tf.int32)
Model(inputs=sparse_inputs, outputs=self.call(sparse_inputs)).summary()
```
#### File: rec-model/SASRec/model.py
```python
import tensorflow as tf
from tensorflow.keras.regularizers import l2
from tensorflow.keras.layers import Layer, Dense, LayerNormalization, Dropout, Embedding, Input
from modules import *
class SASRec(tf.keras.Model):
def __init__(self, item_fea_col, blocks=1, num_heads=1, ffn_hidden_unit=128,
dropout=0., maxlen=40, norm_training=True, causality=False, embed_reg=1e-6):
"""
SASRec model
:param item_fea_col: A dict contains 'feat_name', 'feat_num' and 'embed_dim'.
:param blocks: A scalar. The Number of blocks.
:param num_heads: A scalar. Number of heads.
:param ffn_hidden_unit: A scalar. Number of hidden unit in FFN
:param dropout: A scalar. Number of dropout.
:param maxlen: A scalar. Number of length of sequence
:param norm_training: Boolean. If True, using layer normalization, default True
:param causality: Boolean. If True, using causality, default True
:param embed_reg: A scalar. The regularizer of embedding
"""
super(SASRec, self).__init__()
# sequence length
self.maxlen = maxlen
# item feature columns
self.item_fea_col = item_fea_col
# embed_dim
self.embed_dim = self.item_fea_col['embed_dim']
# d_model must be the same as embedding_dim, because of residual connection
self.d_model = self.embed_dim
# item embedding
self.item_embedding = Embedding(input_dim=self.item_fea_col['feat_num'],
input_length=1,
output_dim=self.item_fea_col['embed_dim'],
mask_zero=True,
embeddings_initializer='random_uniform',
embeddings_regularizer=l2(embed_reg))
self.pos_embedding = Embedding(input_dim=self.maxlen,
input_length=1,
output_dim=self.embed_dim,
mask_zero=False,
embeddings_initializer='random_uniform',
embeddings_regularizer=l2(embed_reg))
self.dropout = Dropout(dropout)
# attention block
self.encoder_layer = [EncoderLayer(self.d_model, num_heads, ffn_hidden_unit,
dropout, norm_training, causality) for b in range(blocks)]
def call(self, inputs, training=None):
# inputs
seq_inputs, pos_inputs, neg_inputs = inputs # (None, maxlen), (None, 1), (None, 1)
# mask
mask = tf.expand_dims(tf.cast(tf.not_equal(seq_inputs, 0), dtype=tf.float32), axis=-1) # (None, maxlen, 1)
# seq info
seq_embed = self.item_embedding(seq_inputs) # (None, maxlen, dim)
# pos encoding
# pos_encoding = positional_encoding(seq_inputs, self.embed_dim)
pos_encoding = tf.expand_dims(self.pos_embedding(tf.range(self.maxlen)), axis=0)
seq_embed += pos_encoding
seq_embed = self.dropout(seq_embed)
att_outputs = seq_embed # (None, maxlen, dim)
att_outputs *= mask
# self-attention
for block in self.encoder_layer:
att_outputs = block([att_outputs, mask]) # (None, seq_len, dim)
att_outputs *= mask
# user_info = tf.reduce_mean(att_outputs, axis=1) # (None, dim)
user_info = tf.expand_dims(att_outputs[:, -1], axis=1) # (None, 1, dim)
# item info
pos_info = self.item_embedding(pos_inputs) # (None, 1, dim)
neg_info = self.item_embedding(neg_inputs) # (None, 1/100, dim)
pos_logits = tf.reduce_sum(user_info * pos_info, axis=-1) # (None, 1)
neg_logits = tf.reduce_sum(user_info * neg_info, axis=-1) # (None, 1)
# loss
losses = tf.reduce_mean(- tf.math.log(tf.nn.sigmoid(pos_logits)) -
tf.math.log(1 - tf.nn.sigmoid(neg_logits))) / 2
self.add_loss(losses)
logits = tf.concat([pos_logits, neg_logits], axis=-1)
return logits
def summary(self):
seq_inputs = Input(shape=(self.maxlen,), dtype=tf.int32)
pos_inputs = Input(shape=(1,), dtype=tf.int32)
neg_inputs = Input(shape=(1,), dtype=tf.int32)
tf.keras.Model(inputs=[seq_inputs, pos_inputs, neg_inputs],
outputs=self.call([seq_inputs, pos_inputs, neg_inputs])).summary()
def test_model():
item_fea_col = {'feat': 'item_id', 'feat_num': 100, 'embed_dim': 8}
model = SASRec(item_fea_col, num_heads=8)
model.summary()
# test_model()
``` |
{
"source": "jingxlim/CircuitSeeker",
"score": 2
} |
#### File: CircuitSeeker/CircuitSeeker/mocorr.py
```python
from glob import glob
import numpy as np
import os
import CircuitSeeker.fileio as csio
import CircuitSeeker.distributed as csd
import dask.array as da
import dask.bag as db
import dask.delayed as delayed
import SimpleITK as sitk
from scipy.ndimage import percentile_filter, gaussian_filter1d
import zarr
from numcodecs import Blosc
def ensureArray(reference, dataset_path):
"""
"""
if not isinstance(reference, np.ndarray):
if not isinstance(reference, str):
raise ValueError("image references must be ndarrays or filepaths")
reference = csio.readImage(reference, dataset_path)[...] # hdf5 arrays are lazy
return reference
def rigidAlign(
fixed, moving,
fixed_vox, moving_vox,
dataset_path=None,
metric_sample_percentage=0.1,
shrink_factors=[2,1],
smooth_sigmas=[1,0],
minStep=0.1,
learningRate=1.0,
numberOfIterations=50,
target_spacing=2.0,
):
"""
Returns rigid transform parameters aligning `fixed` coords to `moving` coords
`fixed` and `moving` must be numpy arrays or file paths
`fixed_vox` and `moving_vox` must be fixed and moving image voxel spacings as numpy arrays
if `fixed` and/or `moving` are hdf5 filepaths, you must specify `dataset_path`
remaining arguments adjust the rigid registration algorithm
Images are skip sub-sampled before registration. The skip stride is determined by
`target_spacing` which is the target voxel spacing after skip sub-sampling.
Images are never up-sampled so axes with spacing greater than `target_spacing` are
not skip sub-sampled.
"""
# get moving/fixed images as ndarrays
fixed = ensureArray(fixed, dataset_path)
moving = ensureArray(moving, dataset_path)
# determine skip sample factors
fss = np.maximum(np.round(target_spacing / fixed_vox), 1).astype(np.int)
mss = np.maximum(np.round(target_spacing / moving_vox), 1).astype(np.int)
# skip sample the images
fixed = fixed[::fss[0], ::fss[1], ::fss[2]]
moving = moving[::mss[0], ::mss[1], ::mss[2]]
fixed_vox = fixed_vox * fss
moving_vox = moving_vox * mss
# convert to sitk images, set spacing
fixed = sitk.GetImageFromArray(fixed)
moving = sitk.GetImageFromArray(moving)
fixed.SetSpacing(fixed_vox[::-1]) # numpy z,y,x --> itk x,y,z
moving.SetSpacing(moving_vox[::-1])
# set up registration object
irm = sitk.ImageRegistrationMethod()
ncores = int(os.environ["LSB_DJOB_NUMPROC"]) # LSF specific!
irm.SetNumberOfThreads(2*ncores)
irm.SetInterpolator(sitk.sitkLinear)
# metric, built for speed
irm.SetMetricAsMeanSquares()
irm.SetMetricSamplingStrategy(irm.RANDOM)
irm.SetMetricSamplingPercentage(metric_sample_percentage)
# optimizer, built for simplicity
max_step = np.min(fixed_vox)
irm.SetOptimizerAsRegularStepGradientDescent(
minStep=minStep, learningRate=learningRate,
numberOfIterations=numberOfIterations,
maximumStepSizeInPhysicalUnits=max_step
)
irm.SetOptimizerScalesFromPhysicalShift()
# pyramid
irm.SetShrinkFactorsPerLevel(shrinkFactors=shrink_factors)
irm.SetSmoothingSigmasPerLevel(smoothingSigmas=smooth_sigmas)
irm.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
# initialize
irm.SetInitialTransform(sitk.Euler3DTransform())
# execute, convert to numpy and return
transform = irm.Execute(sitk.Cast(fixed, sitk.sitkFloat32),
sitk.Cast(moving, sitk.sitkFloat32),
)
return transform.GetParameters()
def applyTransform(
moving,
moving_vox,
params,
dataset_path=None):
"""
"""
# get the moving image as a numpy array
moving = ensureArray(moving, dataset_path)
# use sitk transform and interpolation to apply transform
moving = sitk.GetImageFromArray(moving)
moving.SetSpacing(moving_vox[::-1]) # numpy z,y,x --> itk x,y,z
transform = _parametersToEuler3DTransform(params)
transformed = sitk.Resample(moving, moving, transform,
sitk.sitkLinear, 0.0, moving.GetPixelID()
)
# return as numpy array
return sitk.GetArrayFromImage(transformed)
# useful format conversions for rigid transforms
def _euler3DTransformToParameters(euler):
"""
"""
return np.array(( euler.GetAngleX(),
euler.GetAngleY(),
euler.GetAngleZ() ) +
euler.GetTranslation()
)
def _parametersToEuler3DTransform(params):
"""
"""
transform = sitk.Euler3DTransform()
transform.SetRotation(*params[:3])
transform.SetTranslation(params[3:])
return transform
def _parametersToRigidMatrix(params):
"""
"""
transform = _parametersToEuler3DTransform(params)
matrix = np.eye(4)
matrix[:3, :3] = np.array(transform.GetMatrix()).reshape((3,3))
matrix[:3, -1] = np.array(transform.GetTranslation())
return matrix
# TODO: refactor motionCorrect
def motionCorrect(
folder, prefix, suffix,
fixed, fixed_vox, moving_vox,
write_path, dataset_path=None,
distributed_state=None, sigma=7,
transforms_dir=None,
**kwargs,
):
"""
"""
# set up the distributed environment
ds = distributed_state
if distributed_state is None:
ds = csd.distributedState()
# writing large compressed chunks locks GIL for a long time
ds.modifyConfig({'distributed.comm.timeouts.connect':'60s',
'distributed.comm.timeouts.tcp':'180s',}
)
ds.initializeLSFCluster(job_extra=["-P scicompsoft"])
ds.initializeClient()
# create (lazy) dask bag from all frames
frames = csio.daskBagOfFilePaths(folder, prefix, suffix)
nframes = frames.npartitions
# scale cluster carefully
if 'max_workers' in kwargs.keys():
max_workers = kwargs['max_workers']
else:
max_workers = 1250
ds.scaleCluster(njobs=min(nframes, max_workers))
# align all
dfixed = delayed(fixed)
dfixed_vox = delayed(fixed_vox)
dmoving_vox = delayed(moving_vox)
ddataset_path = delayed(dataset_path)
params = frames.map(lambda b,w,x,y,z: rigidAlign(w,b,x,y, dataset_path=z),
w=dfixed, x=dfixed_vox, y=dmoving_vox, z=ddataset_path,
).compute()
params = np.array(list(params))
# (weak) outlier removal and smoothing
params = percentile_filter(params, 50, footprint=np.ones((3,1)))
params = gaussian_filter1d(params, sigma, axis=0)
# write transforms as matrices
if transforms_dir is not None:
paths = list(frames)
for ind, p in enumerate(params):
transform = _parametersToRigidMatrix(p)
basename = os.path.splitext(os.path.basename(paths[ind]))[0]
path = os.path.join(transforms_dir, basename) + '_rigid.mat'
np.savetxt(path, transform)
# apply transforms to all images
params = db.from_sequence(params, npartitions=nframes)
transformed = frames.map(lambda b,x,y,z: applyTransform(b,x,y, dataset_path=z),
x=dmoving_vox, y=params, z=ddataset_path,
).to_delayed()
# convert to a (lazy) 4D dask array
sh = transformed[0][0].shape.compute()
dd = transformed[0][0].dtype.compute()
arrays = [da.from_delayed(t[0], sh, dtype=dd) for t in transformed]
transformed = da.stack(arrays, axis=0)
# write in parallel as 4D array to zarr file
compressor = Blosc(cname='zstd', clevel=9, shuffle=Blosc.BITSHUFFLE)
transformed_disk = zarr.open(write_path, 'w',
shape=transformed.shape, chunks=(256, 10, 256, 256),
dtype=transformed.dtype, compressor=compressor
)
da.to_zarr(transformed, transformed_disk)
# release resources
if distributed_state is None:
ds.closeClient()
# return reference to data on disk
return transformed_disk
def distributedImageMean(
folder, prefix, suffix, dataset_path=None,
distributed_state=None, write_path=None,
):
"""
Returns mean over images matching `folder/prefix*suffix`
If images are hdf5 you must specify `dataset_path`
To additionally write the mean image to disk, specify `write_path`
Computations are distributed, to supply your own dask scheduler and cluster set
`distributed_state` to an existing `CircuitSeeker.distribued.distributedState` object
otherwise a new cluster will be created
"""
# set up the distributed environment
ds = distributed_state
if distributed_state is None:
ds = csd.distributedState()
ds.initializeLSFCluster(job_extra=["-P scicompsoft"])
ds.initializeClient()
# hdf5 files use dask.array
if csio.testPathExtensionForHDF5(suffix):
frames = csio.daskArrayBackedByHDF5(folder, prefix, suffix, dataset_path)
nframes = frames.shape[0]
ds.scaleCluster(njobs=nframes)
frames_mean = frames.mean(axis=0).compute()
frames_mean = np.round(frames_mean).astype(frames[0].dtype)
# other types use dask.bag
else:
frames = csio.daskBagOfFilePaths(folder, prefix, suffix)
nframes = frames.npartitions
ds.scaleCluster(njobs=nframes)
frames_mean = frames.map(csio.readImage).reduction(sum, sum).compute()
dtype = frames_mean.dtype
frames_mean = np.round(frames_mean/np.float(nframes)).astype(dtype)
# release resources
if distributed_state is None:
ds.closeClient()
# write result
if write_path is not None:
if csio.testPathExtensionForHDF5(write_path):
csio.writeHDF5(write_path, dataset_path, frames_mean)
else:
csio.writeImage(write_path, frames_mean)
# return reference to mean image
return frames_mean
``` |
{
"source": "jingxlim/fish",
"score": 2
} |
#### File: fish/scripts/save_dff.py
```python
def get_sc(app_name):
from pyspark import SparkConf, SparkContext
conf = SparkConf().setAppName(app_name)
sc = SparkContext(conf=conf)
return sc
def get_background_offset(raw_path):
from numpy import median
from glymur import jp2k
background_im_fname = raw_path + "Background_0.tif"
background_im = jp2k.Jp2k(background_im_fname)[:]
return median(background_im)
def prepare_images(files, context, median_filter_size, background_offset):
from thunder import images as tdims
from fish.util.fileio import read_image
images = tdims.fromlist(files, accessor=read_image, engine=context)
images = images.map(lambda v: (v - background_offset).clip(1, None))
images = images.median_filter(size=median_filter_size)
return images
def get_params(path):
import json
with open(path, "r") as f:
params = json.load(f)
return params
def motion_correction(images, reg_path, overwrite=False):
from scipy.ndimage.interpolation import shift
from os.path import exists
from os import makedirs
from skimage.io import imsave
from fish.image.alignment import estimate_translation
from numpy import save, array, zeros, vstack, load, arange
from scipy.ndimage.filters import median_filter
ref_range = arange(-5, 5) + images.shape[0] // 2
medfilt_window = 200
if not exists(reg_path):
makedirs(reg_path)
overwrite = True
try:
affs = load(reg_path + "regparams_affine.npy")
print("Registration params found")
except FileNotFoundError:
print("Registration params not found, performing registration")
overwrite = True
if overwrite:
ref = images[ref_range].mean().toarray().astype("float32")
imsave(reg_path + "anat_reference.tif", ref)
reg = images.map(lambda v: estimate_translation(ref.max(0), v.max(0))).toarray()
affs = array([r.affine for r in reg])
save(reg_path + "regparams_affine.npy", affs)
x_trans = median_filter(affs[:, -2, -1], size=medfilt_window)
y_trans = median_filter(affs[:, 0, -1], size=medfilt_window)
z_trans = zeros(x_trans.shape)
trans = vstack([z_trans, y_trans, x_trans])
shifter = lambda v: shift(v[1], -trans[:, v[0][0]], cval=0).astype("float32")
images_transformed = images.map(shifter, with_keys=True)
return images_transformed
def apply_dff(images, dff_fun, out_dtype):
from numpy import array
from skimage.exposure import rescale_intensity as rescale
images_dff = images.map_as_series(
dff_fun, value_size=images.shape[0], dtype=images.dtype
)
bounds = images_dff.map(lambda v: array([v.min(), v.max()])).toarray()
mn, mx = bounds.min(), bounds.max()
images_rescaled = images_dff.map(
lambda v: rescale(v, in_range=(mn, mx), out_range=out_dtype).astype(out_dtype)
)
dff_lim = (mn, mx)
return images_rescaled, dff_lim
def rdd_to_tif(kv, path):
from skimage.io import imsave
key = kv[0][0]
val = kv[1]
fname = "t_{:06d}.tif".format(key)
imsave(path + fname, val, imagej=True)
def save_images(images, out_path, multifile, exp_name):
# save the images
if multifile:
from os import makedirs
from os.path import exists
# make a folder for all these images
subdir = out_path + "dff/"
if not exists(subdir):
makedirs(subdir)
images.tordd().foreach(lambda v: rdd_to_tif(v, subdir))
else:
from skimage.io import imsave
imsave(out_path + exp_name + ".tif", images.toarray(), imagej=True)
def parse_args():
from argparse import ArgumentParser
parser = ArgumentParser(
description="Generate a df/f volume from raw light sheet data, and save as .tif files."
)
parser.add_argument("raw_path", help="A path to a directory of raw files.")
parser.add_argument(
"param_path", help="A path to a json file containing dff params."
)
parser.add_argument("output_path", help="A path to a directory to contain output.")
args = parser.parse_args()
return args
def generate_dff_images(raw_path, param_path, output_path, sc):
from fish.image.zds import ZDS
from fish.image.vol import dff
from skimage.transform import downscale_local_mean
from functools import partial
import json
from os.path import exists
from os import makedirs
dset = ZDS(raw_path)
# deal with YuMu's convention of renaming the raw data folder
if dset.exp_name == "raw":
dset.exp_name = dset.metadata["data_header"]
params = get_params(param_path)
if not exists(output_path):
makedirs(output_path)
reg_path = output_path + "reg/"
dff_fun = partial(
dff,
window=params["baseline_window"] * dset.metadata["volume_rate"],
percentile=params["baseline_percentile"],
baseline_offset=params["baseline_offset"],
downsample=params["baseline_downsampling"],
)
downsample_fun = partial(
downscale_local_mean, factors=tuple(params["spatial_downsampling"])
)
background_offset = get_background_offset(raw_path)
median_filter_size = (1, 3, 3)
print("Preparing images...")
ims = prepare_images(dset.files, sc, median_filter_size, background_offset)
print("Registering images...")
ims_registered = motion_correction(
ims, reg_path, overwrite=params["overwrite_registration"]
)
ims_ds = ims_registered.map(downsample_fun)
print("Estimating dff...")
ims_dff, dff_lim = apply_dff(ims_ds, dff_fun, params["out_dtype"])
print("Saving images...")
save_images(
ims_dff, output_path, multifile=params["save_multifile"], exp_name=dset.exp_name
)
metadata = params.copy()
metadata["dff_lims"] = [float(dff_lim[0]), float(dff_lim[1])]
metadata_fname = output_path + "dff_metadata.json"
with open(metadata_fname, "w") as fp:
json.dump(metadata, fp)
return 1
if __name__ == "__main__":
args = parse_args()
sc = get_sc("dff_movie")
generate_dff_images(args.raw_path, args.param_path, args.output_path, sc)
```
#### File: fish/scripts/stack_conversion.py
```python
from glob import glob
import os
import volTools as volt
import fileTools as ftools
from pyspark import SparkConf, SparkContext
conf = SparkConf().setAppName("image_conversion")
sc = SparkContext(conf=conf)
to_convert = [
"/nobackup/ahrens/davis/data/raw/20160608/6dpf_cy171xcy221_f1_omr_1_20160608_170933/",
"/nobackup/ahrens/davis/data/raw/20160608/6dpf_cy171xcy221_f2_omr_1_20160608_190404/",
"/nobackup/ahrens/davis/data/raw/20160608/6dpf_cy171xcy221_f2_omr_1_20160608_190404/",
"/nobackup/ahrens/davis/data/raw/20160614/5dpf_cy171xcy221_f1_caudal_omr_1_20160614_183344/",
"/nobackup/ahrens/davis/data/raw/20160614/5dpf_cy171xcy221_f1_caudal_omr_2_20160614_185018/",
]
def image_conversion(raw_dir, source_format="stack", dest_format="klb"):
"""
Find all files in a directory with a specified format, parallelize this list over a cluster using spark, and convert each file to a new format.
raw_dir : string
Directory containing files to be converted
source_format : string, default is 'stack'
The input format of the files to be converted. Supported formats are 'stack' and 'tif'.
dest_format : string, default is 'klb'
The output format of the converted files. Supported formats are 'klb' and 'hdf5'
"""
from glob import glob
# Data files start with `TM`
source_glob = "{0}TM*.{1}".format(raw_dir, source_format)
dest_glob = "{0}TM*.{1}".format(raw_dir, dest_format)
print("Source directory: {0}".format(raw_dir))
fnames = glob(source_glob)
fname_rdd = sc.parallelize(fnames, numSlices=256)
old_source = fnames
old_dest = glob(dest_glob)
print("pre-conversion: number of {0} files: {1}".format(dest_format, len(old_dest)))
print(
"pre-conversion: number of {0} files: {1}".format(
source_format, len(old_source)
)
)
convert_fun = lambda f: ftools.image_conversion(f, dest_format, wipe=True)
if len(old_source) == 0:
print("No {0} files found!".format(source_format))
else:
fname_rdd.foreach(convert_fun)
new_dest = glob(dest_glob)
new_source = glob(source_glob)
print(
"post-conversion: number of {0} files: {1}".format(dest_format, len(new_dest))
)
print(
"post-conversion: number of {0} files: {1}".format(
source_format, len(new_source)
)
)
for r in to_convert:
try:
image_conversion(r)
except:
print("Something went wrong processing {0}".format(r))
```
#### File: fish/util/fileio.py
```python
def _tif_reader(tif_path, roi=None):
from skimage.io import imread
if roi is not None:
raise NotImplementedError
return imread(tif_path)
def _tif_writer(tif_path, image):
from skimage.io import imsave
imsave(tif_path, image)
def _stack_reader(stack_path, roi=None, hi_res_zstep=1.0):
from numpy import fromfile, memmap
from os.path import sep, split, isfile
from fish.image.zds import get_metadata
#
for filename in ["ch0.xml", "ch0_cam0.xml", "ch0_cam1.xml"]:
param_file = split(stack_path)[0] + sep + filename
if isfile(param_file): break
dims = get_metadata(param_file)["dimensions"][::-1]
if 'Hi_Res.stack' in stack_path:
z_step = get_metadata(param_file)["z_step"]
dims[0] = ((dims[0]-1) * (z_step / hi_res_zstep)) + 1 ## find thickness, divide by hi res z_step and add 1
if roi is not None:
im = memmap(stack_path, dtype="uint16", shape=dims, mode="r")[roi]
else:
try:
im = fromfile(stack_path, dtype="uint16").reshape(dims)
return im
except:
print(stack_path)
raise
def _stack_writer(stack_path, image):
raise NotImplementedError
def _klb_reader(klb_path, roi=None):
from pyklb import readfull
if roi is not None:
raise NotImplementedError
# pyklb whines if it doesn't get a python string
return readfull(str(klb_path))
def _klb_writer(klb_path, image):
from pyklb import writefull
writefull(image, str(klb_path))
def _h5_reader(h5_path, dset_name='default', roi=None):
from h5py import File
if roi is None:
roi = slice(None)
with File(h5_path, 'r', libver='latest') as f:
return f[dset_name][roi]
def _h5_writer(h5_path, data):
from h5py import File
from os import remove
from os.path import exists
if exists(h5_path):
remove(h5_path)
with File(h5_path, "w") as f:
f.create_dataset(
"default", data=data, compression="gzip", chunks=True, shuffle=True
)
f.close()
def _jp2_reader(jp2_path, roi=None):
from glymur import Jp2k
return Jp2k(jp2_path).read()[roi]
def _jp2_writer(jp2_path, image):
raise NotImplementedError
readers = dict()
readers['stack'] = _stack_reader
readers['tif'] = _tif_reader
readers['klb'] = _klb_reader
readers['h5'] = _h5_reader
readers['hdf5'] = _h5_reader
readers['jp2'] = _jp2_reader
writers = dict()
writers["stack"] = _stack_writer
writers["tif"] = _tif_writer
writers["klb"] = _klb_writer
writers["h5"] = _h5_writer
writers["jp2"] = _jp2_writer
def read_image(fname, roi=None, dset_name='default', parallelism=1):
"""
Load .stack, .tif, .klb, .h5, or jp2 data and return as a numpy array
fname : string, path to image file
roi : tuple of slice objects. For data in hdf5 format, passing an roi allows the rapid loading of a chunk of data.
parallelism : int, defines the number of cores to use for loading multiple images. Set to -1 to use all cores.
"""
from functools import partial
from numpy import array, ndarray
from multiprocessing import Pool, cpu_count
if isinstance(fname, str):
fmt = fname.split('.')[-1]
if fmt == '.h5' or fmt == '.hdf5':
reader = partial(readers[fmt], roi=roi, dset_name=dset_name)
else:
reader = partial(readers[fmt], roi=roi)
result = reader(fname)
elif isinstance(fname, (tuple, list, ndarray)):
fmt = fname[0].split('.')[-1]
if fmt == '.h5' or fmt == '.hdf5':
reader = partial(readers[fmt], roi=roi, dset_name=dset_name)
else:
reader = partial(readers[fmt], roi=roi)
if parallelism == 1:
result = array([reader(f) for f in fname])
else:
if parallelism == -1:
num_cores = cpu_count()
else:
num_cores = min(parallelism, cpu_count())
with Pool(num_cores) as pool:
result = array(pool.map(reader, fname))
else:
raise TypeError(
"First argument must be string for a one file or (tuple, list, ndarray) for many files"
)
return result
def write_image(fname, data):
"""
Write a numpy array as .stack, .tif, .klb, or .h5 file
fname : string, path to image file
data : numpy array to be saved to disk
"""
# Get the file extension for this file, assuming it is the last continuous string after the last period
fmt = fname.split(".")[-1]
return writers[fmt](fname, data)
def write_txy_image2tiff(arr, savename):
import imageio
imageio.mimwrite(savename,arr)
def to_dask(fnames,dset_name='default'):
"""
Return a dask array constructued from an collection of ndarrays distributed across multiple files.
fnames : iterable of sorted filenames
"""
from dask.array import from_delayed, from_array, stack
from h5py import File
from dask.delayed import delayed
from numpy import memmap
fmt = fnames[0].split('.')[-1]
s = read_image(fnames[0],dset_name=dset_name)
def delf(fn):
return File(fn, mode='r', libver='latest')[dset_name][:]
if fmt == 'h5' or fmt == 'hdf5':
result = stack([from_delayed(delayed(delf)(fn), s.shape, s.dtype) for fn in fnames])
return result
elif fmt == "stack":
from os.path import split, sep
mems = [memmap(fn, dtype=s.dtype, shape=s.shape, mode="r") for fn in fnames]
result = stack([from_array(mem, chunks=s.shape) for mem in mems])
return result
elif fmt in ("tif", "jp2"):
rdr = delayed(read_image)
result = stack(
[from_delayed(rdr(fn), shape=s.shape, dtype=s.dtype) for fn in fnames]
)
return result
else:
raise NotImplementedError("{0} files not supported at this time".format(fmt))
def image_conversion(source_path, dest_fmt, wipe=False):
"""
Convert image from one format to another, optionally erasing the source image
image_path : string
Path to image to be converted.
wipe : bool
If True, delete the source image after successful conversion
"""
from numpy import array_equal
from os import remove
# the name of the file before format extension
source_name = source_path.split(".")[0]
dest_path = source_name + "." + dest_fmt
source_image = read_image(source_path)
write_image(dest_path, source_image)
if wipe:
check_image = read_image(dest_path)
if array_equal(check_image, source_image):
remove(source_path)
else:
print('{0} and {1} differ... something went wrong!'.format(source_path, dest_path))
def resample_image(source_path, dest_fmt, indices=[], wipe=False):
"""
Resample the image to desired dimensions, optionally erasing the source image
image_path : string
Path to image to be converted.
indices : list
List of lists specifying the start and stop in each dimension.
The dimensions of list have to correspond to the the
dimension of the image.
For example, for a tzyx image of size (18000, 26, 1024, 2048),
you could try something like this:
z_start = 0
z_stop = 13
y_start = 300
y_stop = 900
x_start = 0
x_stop = 600
indices = [[z_start,z_stop],
[y_start,y_stop],
[x_start,x_stop]]
This should give you an image of size (18000, 13, 600, 600).
wipe : bool
If True, delete the source image after successful conversion
"""
from numpy import array_equal
from os import remove
# unpack indices
z_start = indices[0][0]
z_stop = indices[0][1]
y_start = indices[1][0]
y_stop = indices[1][1]
x_start = indices[2][0]
x_stop = indices[2][1]
# the name of the file before format extension
source_name = source_path.split('.')[0]
dest_name = source_name.replace('/im/','/forebrain/')
dest_path = dest_name + '.' + dest_fmt
source_image = read_image(source_path)
resampled_image = source_image[z_start:z_stop, \
y_start:y_stop, \
x_start:x_stop]
write_image(dest_path, resampled_image)
print(dest_path + ' written')
if wipe:
check_image = read_image(dest_path)
if array_equal(check_image, source_image):
remove(source_path)
else:
print('{0} and {1} differ... something went wrong!'.format(source_path, dest_path))
``` |
{
"source": "jingxlim/segmentation",
"score": 2
} |
#### File: segmentation/python/z2_brain_mask.py
```python
def z2():
global thr_prob
thr_prob0 = np.copy(thr_prob)
for frame_i in range(imageframe_nmbr):
if os.path.isfile(output_dir + 'brain_mask' + str(frame_i) + '.hdf5'):
try:
mask_reset = eval(input('Reset brain_mask? [0, no]; 1, yes. '))
except SyntaxError:
mask_reset = 0
if not mask_reset:
continue
# get image mean
def get_img_hdf(name_i):
image_filename = image_dir(name_i, frame_i) + 'image_aligned.hdf5'
with h5py.File(image_filename, 'r') as file_handle:
return file_handle['V3D'][()].T
image_dims = get_img_hdf(image_names[0]).shape
assert(np.allclose(image_dims, (lx//ds, ly//ds, lz)))
class accum_param(pyspark.accumulators.AccumulatorParam):
'''define accumulator class'''
def zero(self, val0):
return np.zeros(val0.shape, dtype='float32')
def addInPlace(self, val1, val2):
return val1 + val2
image_accumulator = \
sc.accumulator(np.zeros(image_dims, dtype='float32'), accum_param())
sc.parallelize(image_names).foreach(
lambda name_i: image_accumulator.add(get_img_hdf(name_i)))
image_mean = 1.0 * image_accumulator.value / lt
# get medium and fine resolution peaks
def medin_filt(img, ftp):
return ndimage.filters.median_filter(img, footprint=ftp)
image_peak = image_mean > medin_filt(image_mean, cell_ball)
image_peak_fine = image_mean > medin_filt(image_mean, cell_ball_fine)
# compute power and probability
Powr = np.log10(image_mean.ravel())[:, None]
Powr = np.log10(np.random.permutation(image_mean.ravel())[:100000, None])
gmm = mixture.GaussianMixture(n_components=2, max_iter=100, n_init=100).fit(Powr)
Prob = gmm.predict_proba(Powr)
Prob = Prob[:, np.argmax(Powr[np.argmax(Prob, 0)])]
# get and save brain mask
thr_prob = np.copy(thr_prob0)
mask_flag = (thr_prob != 0)
while 1:
plt.figure(1, (12, 4))
plt.subplot(121); _ = plt.hist(Powr, 100); plt.title('10^(Pixel power histogram)')
plt.subplot(122); _ = plt.hist(Prob, 100); plt.title('Probability threshold')
plt.show()
if not thr_prob:
try:
thr_prob = eval(input('Enter probability threshold [default 0.5]: '))
except SyntaxError:
thr_prob = 0.5
thr_prob = np.ravel(thr_prob)
if len(thr_prob) == 1:
ix = np.argmin(np.abs(Prob - thr_prob))
thr_mask = 10 ** Powr[ix][0]
if np.isinf(thr_prob):
thr_mask = thr_prob
elif len(thr_prob) == 2:
thr_mask = thr_prob[1]
thr_prob = thr_prob[0]
print('Proceeding with fluorescence threshold of %f.' %thr_mask)
else:
continue
# remove all disconnected components less than 5000 cubic microliters in size
small_obj = np.round(5000 * (resn_x * ds * resn_y * ds * resn_z)).astype(int)
brain_mask = (image_mean > thr_mask)
brain_mask = morphology.remove_small_objects(brain_mask, small_obj)
for i in range(lz):
plt.figure(1, (12, 6))
plt.subplot(121); plt.imshow((image_mean * ( brain_mask))[:, :, i].T, cmap='hot')
plt.subplot(122); plt.imshow((image_peak * (1 + brain_mask))[:, :, i].T, cmap='hot')
plt.show()
if not mask_flag:
try:
mask_flag = eval(input('Is thr_prob = %.4f (thr_mask = %.1f) accurate? [1, yes]; 0, no. ' %(thr_prob, thr_mask)))
except SyntaxError:
mask_flag = 1
if not mask_flag:
thr_prob = 0
else:
break
plt.close('all')
with h5py.File(output_dir + 'brain_mask' + str(frame_i) + '.hdf5', 'w') as file_handle:
file_handle['brain_mask'] = brain_mask.T
file_handle['image_mean'] = image_mean.T
file_handle['image_peak'] = image_peak.T
file_handle['image_peak_fine'] = image_peak_fine.T
file_handle['thr_prob'] = thr_prob
file_handle['thr_mask'] = thr_mask
file_handle['background'] = np.median(image_mean[brain_mask==0])
z2()
```
#### File: src/segmentation/mika_helper.py
```python
import numpy as np
import matplotlib.pyplot as plt
import h5py
from analysis_toolbox.spim_helper import spim
def load_segmented_data(
# spim attributes
im_dir, impro_dir=[], ephys_file='',
channel_labels=[], debug=False,
# segmented attributes
cell_file='', cleaned_file='', component_file='',
parameters_file='', mask_file=''):
"""
Keyword Arguments:
im_dir --
impro_dir -- (default [])
ephys_file -- (default None)
channel_labels -- (default [])
debug --
"""
spim_dset = segmented(
# spim attributes
im_dir, impro_dir=impro_dir, ephys_file=ephys_file,
channel_labels=channel_labels, debug=debug,
# segmented attributes
cell_file=cell_file,
cleaned_file=cleaned_file,
component_file=component_file,
parameters_file=parameters_file,
mask_file=mask_file)
spim_dset.setup() ## already done from the imaging class
try: spim_dset.open_raw_images() ## from the spim subclass
except: pass
spim_dset.load_segmented_files() ## from the segmented subclass
## if processed image file path is provided, load processed images
if len(impro_dir) != 0: spim_dset.open_processed_images() ## from the spim subclass
## if ephys file is provided, load ephys file
if len(ephys_file) != 0:
spim_dset.load_and_match_ephys_data() ## function of the same name as the spim subclass, but redefined here
return spim_dset
class segmented(spim):
def __init__(self,
# spim attributes
im_dir, impro_dir=[], ephys_file='',
channel_labels=[], debug=False,
# segmented attributes
cell_file='', cleaned_file='', component_file='',
parameters_file='', mask_file=''):
######################
# imaging attributes #
######################
super().__init__(self) ## set path, ephys_file and debug
## Initialize imaging class
self.path = im_dir
self.ephys_file = ephys_file
self.debug = debug
self.setup() ## set savepath, expt_id, expt_date, expt_name
###################
# spim attributes #
###################
self.ppaths = impro_dir
self.channel_labels = channel_labels
########################
# segmented attributes #
########################
self.cell_file = cell_file
self.cleaned_file = cleaned_file
self.component_file = component_file
self.parameters_file = parameters_file
self.mask_file = mask_file
###################################################################
# Data I/O #
###################################################################
def load_segmented_files(self, debug=False):
self.load_mask_file()
self.load_cell_file()
self.load_cleaned_file()
self.load_component_file()
print('Segmented imaging data loaded!')
def load_mask_file(self, debug=False):
try:
f = h5py.File(self.mask_file,'r')
self.background = f['background'][()]
self.blok_lidx = f['blok_lidx'][()]
self.blok_nmbr = f['blok_nmbr'][()]
self.brain_mask = f['brain_mask'][()]
self.image_mean = f['image_mean'][()]
self.image_peak = f['image_peak'][()]
self.image_peak_fine = f['image_peak_fine'][()]
self.thr_mask = f['thr_mask'][()]
self.thr_prob = f['thr_prob'][()]
f.close()
except: print("Problems loading %s" % self.mask_file)
def load_cell_file(self, debug=False):
try:
f = h5py.File(self.cell_file,'r')
self.Cmpn_position = f['Cmpn_position'][()]
self.Cmpn_spcesers = f['Cmpn_spcesers'][()]
self.Cmpn_timesers = f['Cmpn_timesers'][()]
self.dims = f['dims'][()]
self.freq = int(f['freq'][()])
self.resn = f['resn'][()]
f.close()
except: print("Problems loading %s" % self.cell_file)
def load_cleaned_file(self, debug=False):
try:
f = h5py.File(self.cleaned_file,'r')
self.Cell_X = f['Cell_X'][()]
self.Cell_Y = f['Cell_Y'][()]
self.Cell_Z = f['Cell_Z'][()]
self.Cell_baseline1 = f['Cell_baseline1'][()]
self.Cell_spcesers = f['Cell_spcesers'][()]
self.Cell_timesers0 = f['Cell_timesers0'][()]
self.Cell_timesers1 = f['Cell_timesers1'][()]
self.Labels = f['Labels'][()]
self.Volume = f['Volume'][()]
self.background = f['background'][()]
self.freq = int(f['freq'][()])
self.n = int(f['n'][()])
self.x = int(f['x'][()])
self.y = int(f['y'][()])
self.z = int(f['z'][()])
f.close()
except: print("Problems loading %s" % self.cleaned_file)
def load_component_file(self, debug=False):
try:
f = h5py.File(self.component_file,'r')
self.H0 = f['H0'][()]
self.W0 = f['W0'][()].transpose()
try:
self.H1 = f['H1'][()]
self.W1 = f['W1'][()].transpose()
except:
pass
f.close()
except: print("Problems loading %s" % self.component_file)
###################################################################
# Preprocessing #
###################################################################
def check_congruence(self, debug=False):
"""
Determine if the segmented data is accurately derived from the
raw data. Since we commonly downsample in x and y, check for
coherence in the number of planes (i.e. z) and the number of
stacks (i.e. t).
If segmented data is congruent, rely on that data to make
calculations for imaging times and for aligning images to ephys
data, applying the same treatment to the raw images and
processed images.
"""
nstacks = self.Cell_timesers1.shape[1] == self.im.shape[0]
z = self.z == self.im.shape[1]
if np.logical_and(nstacks,z): return True
else: return False
def load_and_match_ephys_data(self, debug=False):
"""
_WARNING: Head_
Redefined method. Many parts copied from segmented.load_and_match_ephys_data().
Remember to update that method whenever you update this one.
(Not the best way; think of a better way eventually.)
"""
print("Aligning ephys and im data")
print("==========================")
self.open_ephys_data() ## from spim class
self.apply_main_image() ## from spim class
if self.check_congruence:
self.match_ephys_im_data() ## WARNING 1
self.calculate_DFF()
def match_ephys_im_data(self, debug=False):
"""
_WARNING: 1_
Redefined method. Many parts copied from segmented.match_ephys_im_data().
Remember to update that method whenever you update this one.
(Not the best way; think of a better way eventually.)
"""
print("Aligning ephys and im data")
print("==========================")
self.compute_imagingtimes_segments(debug=self.debug)
## remove first segment of experiment (that mika clipped because
## it helps with segmentation)
## Remove values from the START ##
if len(self.cell_file) != 0: self.remove_clipped_values()
self.aligned = self.check_align_segments()
## Remove values from the END ##
while np.logical_not(self.aligned):
self.cleanup_artefacts_segments(debug=self.debug)
## Remove values from the END ##
if self.aligned:
self.remove_lastframe(debug=self.debug)
## print out final shapes for sainty check
print('Main image: %s' % (self.im.shape,))
print('Raw image: %s' % (self.im_raw.shape,))
if len(self.im_pro) != 0:
for i,im_pro in enumerate(self.im_pro):
print('Processed image %i: %s' % (i,im_pro.shape,))
print("Computed imaging rate: %f" % self.im_rate)
def compute_imagingtimes_segments(self, debug=False):
t = self.Cell_timesers1.shape[1]
z = self.z
self.compute_imagingtimes(t,z,debug=debug)
def check_align_segments(self, debug=False):
t = self.Cell_timesers1.shape[1]
return self.check_align(t)
@classmethod
def dataset_keys(cls, out_file):
import h5py
h5py_file = h5py.File(out_file,'r')
print(list(h5py_file.keys()))
h5py_file.close()
def remove_clipped_values(self, debug=False):
"""
[Mika segmentation specific]
Mika: Many recordings have big events at the beginning. These
events are hard to correct and can also cause problems with
downstream component detection. To overcome this problem, I
have now set the signal at the onset of each recording (equal
in length to baseline_tau) to a constant value.
"""
# find where the signal stops to be different
artificialstop_imidx = np.where(np.around(self.H0[0,:],decimals=3) \
!= np.around(self.H0[0,0],decimals=3))[0][0]
if artificialstop_imidx != 1:
print("Artificial initial constant value detected. "
"Truncating first %i data points" % artificialstop_imidx)
else: artificialstop_imidx = 0
if debug:
self.ep.plot_stackstarts(xlim_pos='end')
overlay_fig, overlay_ax = self.overlay_im_ephys()
## zoom into end of artefact
overlay_ax.axvline(x=artificialstop_imidx/self.im_rate,color='k',ls='--')
overlay_ax.set_xlim([artificialstop_imidx/self.im_rate-5,artificialstop_imidx/self.im_rate+5])
overlay_ax.set_xlabel('Time [s]')
# truncate imaging data
## !! is this correct? I'm removing stuff from the start !!
self.im = self.im[:artificialstop_imidx,:,:,:]
self.im_raw = self.im_raw[:artificialstop_imidx,:,:,:]
if self.im_eq: # processed images can be treated the same way
for i,im_pro in enumerate(self.im_pro):
self.im_pro[i] = self.im_pro[i][:artificialstop_imidx,:,:,:]
# truncate cell data
self.Cell_timesers0 = self.Cell_timesers0[:,artificialstop_imidx:]
self.Cell_baseline1 = self.Cell_baseline1[:,artificialstop_imidx:]
self.Cell_timesers1 = self.Cell_timesers1[:,artificialstop_imidx:]
# truncate component data (not necessarily included in all analysis)
try: self.H0 = self.H0[:,artificialstop_imidx:]
except: pass
try: self.H1 = self.H1[:,artificialstop_imidx:]
except: pass
# truncate ephys data
artificialstop_ephysidx = self.image_starttimes[artificialstop_imidx]
self.ep.replace_ephys(self.ep.ep[:,artificialstop_ephysidx:])
# recalculate imaging times and check for alignment
self.compute_imagingtimes(debug=self.debug)
self.aligned = self.check_align()
def cleanup_artefacts_segments(self, debug=False):
t = self.Cell_timesers1.shape[1]
self.cleanup_artefacts(t)
def cleanup_artefacts(self, t, debug=False):
"""
_WARNING: 1A_
Redefined method. Many parts copied from spim.cleanup_artefacts().
Remember to update that method whenever you update this one.
(Not the best way; think of a better way eventually.)
"""
num_lsim = t
num_epim = self.image_starts.sum()
n_imdiff = num_epim - num_lsim
print(f'Number of light sheet images: {num_lsim}')
print(f'Number of ephys images: {num_epim}')
if n_imdiff > 0:
print('More images in ephys. Truncating ephys...')
diff_idx = self.image_starttimes[-n_imdiff]
self.ep.replace_ephys(self.ep.ep[:,:diff_idx])
self.compute_imagingtimes_segments(debug=self.debug)
elif n_imdiff < 0:
print('More images in imaging. Truncating imaging...')
# truncate imaging data
self.im = self.im[:n_imdiff,:,:,:]
if self.im_raw is not None:
self.im_raw = self.im_raw[:n_imdiff,:,:,:]
if self.im_eq:
for i,im_pro in enumerate(self.im_pro):
self.im_pro[i] = self.im_pro[i][:n_imdiff,:,:,:]
# truncate cell data
self.Cell_timesers0 = self.Cell_timesers0[:,:n_imdiff]
self.Cell_baseline1 = self.Cell_baseline1[:,:n_imdiff]
self.Cell_timesers1 = self.Cell_timesers1[:,:n_imdiff]
# truncate component data (not necessarily included in all analysis)
try: self.H0 = self.H0[:,:n_imdiff]
except: pass
try: self.H1 = self.H1[:,:n_imdiff]
except: pass
self.aligned = self.check_align()
def remove_lastframe(self, debug=False):
"""
There could be the same number of images in both ephys and imaging but
the ends are not aligned.
_WARNING: 1B_
This method is not inherited but redefined in segmented.remove_lastframe().
Remember to update that method whenever you update this one.
(Not the best way; think of a better way eventually.)
"""
print('Ephys and imaging aligned; remove last frame from both...')
# truncate images
self.im = self.im[:-1,:,:,:]
if self.im_raw is not None:
self.im_raw = self.im_raw[:-1,:,:,:]
if self.im_eq:
for i,im_pro in enumerate(self.im_pro):
self.im_pro[i] = self.im_pro[i][:-1,:,:,:]
# truncate components
try: self.H0 = self.H0[:,:-1]
except: pass
try: self.H1 = self.H1[:,:-1]
except: pass
# truncate cells
self.Cell_timesers0 = self.Cell_timesers0[:,:-1]
self.Cell_baseline1 = self.Cell_baseline1[:,:-1]
self.Cell_timesers1 = self.Cell_timesers1[:,:-1]
# truncate ephys
diff_idx = self.image_starttimes[-1]
self.ep.replace_ephys(self.ep.ep[:,:diff_idx])
self.compute_imagingtimes_segments(debug=self.debug)
self.aligned = self.check_align_segments()
###################################################################
# Analysis #
###################################################################
def overlay_im_ephys(self):
overlay_fig, overlay_ax = plt.subplots(figsize=(9,3))
overlay_ax.plot(np.linspace(0, self.H0[0].shape[0]/self.im_rate, num=self.H0[0].shape[0]),
self.H0[0])
overlay_ax.plot(np.linspace(0, self.H0[0].shape[0]/self.im_rate, num=self.H0[0].shape[0]),
self.H0[0],'.')
overlay_ax.plot(np.linspace(0, self.image_starts.shape[0]/self.ephys_rate, num=self.image_starts.shape[0]),
self.image_starts)
return overlay_fig, overlay_ax
def find_cell(self, cell_num, mask=1):
cell_volume = np.zeros((self.z, self.y, self.x))
for j in range(np.count_nonzero(self.Cell_X[cell_num, :] > 0)):
if mask:
cell_volume[int(self.Cell_Z[cell_num, j]),
int(self.Cell_Y[cell_num, j]),
int(self.Cell_X[cell_num, j])] = mask
else:
cell_volume[int(self.Cell_Z[cell_num, j]),
int(self.Cell_Y[cell_num, j]),
int(self.Cell_X[cell_num, j])] = \
self.Cell_spcesers[cell_num, j]
return cell_volume
def plot_volume(self, nrows, ncols, save_name=None):
"""
Plot all cells segmented using self.Volume.
"""
from analysis_toolbox.utils import get_transparent_cm
trans_inferno = get_transparent_cm('hot',tvmax=1,gradient=False)
nplanes = self.Volume.shape[2]
assert nrows*ncols >= nplanes
vol_fig, vol_ax = plt.subplots(nrows,ncols,figsize=(ncols*4,nrows*3),
squeeze=False)
vol_ax = vol_ax.flatten()
for nplane in range(nplanes):
vol_ax[nplane].imshow(self.image_mean[nplane,:,:], cmap='gray',
vmin=np.percentile(np.ravel(self.image_mean[nplane,:,:]),1),
vmax=np.percentile(np.ravel(self.image_mean[nplane,:,:]),99.9))
vax = vol_ax[nplane].imshow(self.Volume[:,:,nplane].transpose(),
vmax=np.percentile(np.ravel(self.Volume[:,:,:]),99.9),
cmap=trans_inferno)
vol_fig.colorbar(vax,ax=vol_ax[nplane])
vol_ax[nplane].set_title('Plane %i' % nplane)
vol_fig.tight_layout()
if save_name:
vol_fig.savefig(save_name)
return vol_fig, vol_ax
def plot_allcells_map(self, label=None, cmap=None, save_name=None, parallelize=False, show_plot=False, alpha=1):
cells = np.arange(self.n)
cell_volume, vol_fig, vol_ax = self.plot_cell_map(cells,label=label,
cmap=cmap, save_name=save_name,
parallelize=parallelize,
show_plot=show_plot, alpha=alpha)
return cell_volume, vol_fig, vol_ax
def plot_cell_map(self, cells, nrows, ncols, label=None, cmap=None,
save_name=None, parallelize=False,
show_plot=False, alpha=1):
"""
"""
from tqdm import tqdm
if parallelize:
import multiprocessing as mp
num_processes = min(mp.cpu_count(), self.n)
# divide clusters into all processes
cells_list = np.array_split(cells,num_processes)
label_list = np.array_split(label,num_processes)
output=mp.Queue()
processes = [mp.Process(target=self.collapse_multiple_cells,
args=(cells_list[proc],label_list[proc]),
kwargs={"save_name": save_name,
"output": output}) \
for proc in range(num_processes)]
print("Starting %i processes..." % num_processes)
for p in processes: p.start()
for p in processes: p.join()
result = [output.get() for p in processes]
cell_volume = result ## TODO: has to be some combination of result
else:
cell_volume = self.collapse_multiple_cells(cells,label,save_name=save_name)
if show_plot:
vol_fig, vol_ax = self.overlay_volume(cell_volume, nrows, ncols, cmap=cmap, alpha=alpha, save_name=save_name)
return cell_volume, vol_fig, vol_ax
else:
return cell_volume
def overlay_volume(self, volume, nrows, ncols, cmap=None, alpha=1, save_name=None):
nplanes = self.z
assert nrows*ncols >= nplanes
vol_fig, vol_ax = plt.subplots(nrows, ncols, figsize=(ncols*4,nrows*3),
squeeze=False)
vol_ax = vol_ax.flatten()
for nplane in range(nplanes):
vol_ax[nplane].imshow(self.image_mean[nplane,:,:], cmap='gray',
vmin=np.percentile(np.ravel(self.image_mean[nplane,:,:]),1),
vmax=np.percentile(np.ravel(self.image_mean[nplane,:,:]),99.9))
vax = vol_ax[nplane].imshow(volume[nplane,:,:], cmap=cmap, alpha=alpha)
vol_fig.colorbar(vax,ax=vol_ax[nplane])
vol_ax[nplane].set_title('Plane %i' % nplane)
vol_fig.tight_layout()
if save_name:
vol_fig.savefig(save_name)
return vol_fig, vol_ax
def collapse_multiple_cells(self, cell_list, label_list, save_name=None, output=None):
from tqdm import tqdm
from analysis_toolbox.utils import now_str
# create empty volume to fill
cell_volume = np.zeros(self.Volume.shape).T
for cell, label in tqdm(zip(cell_list, label_list),total=len(cell_list)):
volume = self.find_cell(cell, mask=label)
zloc, yloc, xloc = np.where(volume != 0)
cell_volume[zloc,yloc,xloc] = volume[zloc,yloc,xloc]
if save_name: np.save(save_name+now_str(), cell_volume)
if output: output.put(cell_volume)
else: return cell_volume
def plot_cells(self, num_cells=10, mask=0, zoom_pad=25, save_name=None):
from analysis_toolbox.utils import get_transparent_cm
import random
trans_inferno = get_transparent_cm('hot',tvmax=1,gradient=False)
ts_fig, ts_ax = plt.subplots(num_cells,2,figsize=(8.5,num_cells*3),
gridspec_kw = {'width_ratios':[1,3]})
for neuron in range(num_cells):
randcell = random.randint(0,self.n-1)
cell_volume = self.find_cell(randcell, mask=mask)
cell_z = np.where(np.any(cell_volume,axis=(1,2)))[0][0]
try:
ts_ax[neuron,0].imshow(self.image_mean[cell_z],
cmap='gray',
vmax=np.percentile(np.ravel(self.image_mean),99.0))
except:
pass
cell_im = ts_ax[neuron,0].imshow(cell_volume[cell_z],cmap=trans_inferno)
ts_ax[neuron,0].set_title(f'Plane {cell_z}')
if zoom_pad:
max_X = (self.Cell_X[randcell][self.Cell_X[randcell] > 0]).max()
min_X = (self.Cell_X[randcell][self.Cell_X[randcell] > 0]).min()
max_Y = (self.Cell_Y[randcell][self.Cell_Y[randcell] > 0]).max()
min_Y = (self.Cell_Y[randcell][self.Cell_Y[randcell] > 0]).min()
ts_ax[neuron,0].set_xlim([min_X-zoom_pad,max_X+zoom_pad])
ts_ax[neuron,0].set_ylim([min_Y-zoom_pad,max_Y+zoom_pad])
ts_fig.colorbar(cell_im,ax=ts_ax[neuron,0])
dff = (self.Cell_timesers1[randcell,:] - self.Cell_baseline1[randcell,:])/ \
(self.Cell_baseline1[randcell,:] - self.background)
t = len(dff)
ts_ax[neuron,1].plot(np.linspace(0,t/self.im_rate,num=t),
self.Cell_timesers0[randcell,:],
alpha=0.5,label='F')
ts_ax[neuron,1].plot(np.linspace(0,t/self.im_rate,num=t),
self.Cell_timesers1[randcell,:],
alpha=0.5,label='detrended F')
ts_ax[neuron,1].plot(np.linspace(0,t/self.im_rate,num=t),
self.Cell_baseline1[randcell,:],
alpha=0.5,label='baseline')
ts_ax[neuron,1].set_ylim([np.percentile(self.Cell_timesers0[randcell,:],0.1),
np.percentile(self.Cell_timesers0[randcell,:],99.9)])
ts_dff_ax = ts_ax[neuron,1].twinx()
ts_dff_ax.plot(np.linspace(0,t/self.im_rate,num=t),dff,
alpha=0.5,label='$\Delta F / F$',
color='#17becf')
ts_dff_ax.set_ylim([np.percentile(dff,0.1),
np.percentile(dff,99.9)])
ts_ax[neuron,1].legend(loc='lower left',mode='expand',
bbox_to_anchor=(0,1.02,1,0.2),
ncol=3)
xlim_win = 500 # seconds
randslice = random.randint(0,int(t-xlim_win*self.im_rate))
ts_ax[neuron,1].set_xlim(randslice/self.im_rate,
(randslice+xlim_win*self.im_rate)/ \
self.im_rate)
ts_ax[neuron,1].set_ylabel('$F$')
ts_ax[neuron,1].set_xlabel('t [s]')
ts_dff_ax.set_ylabel('$\Delta F / F$')
ts_dff_ax.legend(loc='lower right')
ts_fig.tight_layout()
if save_name:
ts_fig.savefig(save_name)
return ts_fig, ts_ax
def find_component(self, comp_spcesers, comp_num, mask=False):
cell_volume = np.zeros((self.z, self.y, self.x))
for cell in range(np.count_nonzero(np.nonzero(comp_spcesers[comp_num, :]))):
for j in range(np.count_nonzero(self.Cell_X[cell, :] > 0)):
if mask:
cell_volume[int(self.Cell_Z[cell, j]), int(self.Cell_Y[cell, j]),
int(self.Cell_X[cell, j])] = 1
else:
cell_volume[int(self.Cell_Z[cell, j]), int(self.Cell_Y[cell, j]),
int(self.Cell_X[cell, j])] = comp_spcesers[comp_num,cell]
return cell_volume
def mika_visualize_components(self,comp_spcesers, comp_timesers, \
save_name=None):
import datetime
# loop over components
for h in range(comp_spcesers.shape[0]):
# construct component volumes
S = np.zeros((self.x, self.y, self.z))
for i in range(self.n):
for j in range(np.count_nonzero(np.isfinite(self.Cell_X[i]))):
xij, yij, zij = int(self.Cell_X[i, j]), \
int(self.Cell_Y[i, j]), \
int(self.Cell_Z[i, j])
S[xij, yij, zij] = np.maximum(S[xij, yij, zij],
comp_spcesers[h,i])
# visualize component maximal projections
clust_fig, clust_ax = plt.subplots(2,1)
clust_ax[0].imshow(S.max(2).T)
# visualize component timeseries
clust_ax[1].plot(comp_timesers[h])
if save_name:
from datetime import datetime
save_names = save_name.split('.')
clust_fig.savefig(save_names[0] + '.'+ str(h) + '.'+ \
datetime.now().strftime("%Y-%m-%d_%H-%M") + \
'.' + save_names[-1])
def visualize_component(self, comp_num, comp_timesers, comp_spcesers, \
save_name=None, close_fig=False):
from analysis_toolbox.utils import get_transparent_cm
trans_inferno = get_transparent_cm('hot',tvmax=1,gradient=False)
clust_volume = self.find_component(comp_spcesers, comp_num)
nplanes = clust_volume.shape[0]
clust_fig, clust_ax = plt.subplots(nplanes+2,1,
figsize=(8,2*2 + nplanes*3))
clust_ax[0].plot(np.linspace(0,len(comp_timesers[comp_num])/self.im_rate,
num=len(comp_timesers[comp_num])),
comp_timesers[comp_num])
clust_ax[0].set_ylabel('$\Delta F / F$')
clust_ax[0].set_xlabel('Time (s)')
clust_ax[0].set_title('Calcium dynamics')
clust_ax[1].plot(np.linspace(0,len(comp_timesers[comp_num])/self.im_rate,
num=len(comp_timesers[comp_num])),
comp_timesers[comp_num])
clust_ax[1].set_ylabel('$\Delta F / F$')
clust_ax[1].set_xlabel('Time (s)')
clust_ax[1].set_title('Calcium dynamics')
clust_ax[1].set_ylim(top=np.percentile(comp_timesers[comp_num],99.9))
slice_win = 10 # in seconds
rand_slice = np.random.randint(len(comp_timesers[comp_num])/self.im_rate - slice_win)
clust_ax[1].set_xlim([rand_slice, rand_slice+slice_win])
for nplane in range(nplanes):
clust_ax[nplane+2].imshow(self.image_mean[nplane,:,:],cmap='gray')
cax = clust_ax[nplane+2].imshow(clust_volume[nplane,:,:],
vmax=np.percentile(np.ravel(clust_volume),99.9),
cmap=trans_inferno)
clust_fig.colorbar(cax,ax=clust_ax[nplane+2])
clust_fig.suptitle(self.expt_name)
clust_fig.tight_layout()
clust_fig.subplots_adjust(top = 0.9)
if save_name:
from datetime import datetime
save_names = save_name.split('.')
clust_fig.savefig(save_names[0]+'-'+ \
datetime.now().strftime("%Y-%m-%d_%H-%M")+ \
'.'+save_names[1])
if close_fig: plt.close(clust_fig)
return clust_fig, clust_ax
def visualize_multiple_components(self, component_list, comp_spcesers, comp_timesers,
save_name=False, close_fig=False):
for i in component_list:
self.visualize_component(i, comp_timesers, comp_spcesers,
save_name=save_name, close_fig=close_fig)
def visualize_components(self, component_list, comp_spcesers, comp_timesers,
save_name='visualize_cluster', close_fig=False, parallelize=False):
if parallelize:
import multiprocessing as mp
num_processes = min(mp.cpu_count(),len(component_list))
# divide clusters into all processes
components_list = np.array_split(component_list,num_processes)
processes = [mp.Process(target=self.visualize_multiple_components,
args=(components_list[proc],comp_spcesers, comp_timesers),
kwargs={"save_name": self.savepath+'visualize_cluster-'+str(components_list[proc])+'.png',
"close_fig": True}) \
for proc in range(num_processes)]
print("Starting %i processes..." % num_processes)
for p in processes: p.start()
for p in processes: p.join()
print("Done!")
else:
self.visualize_multiple_components(component_list, comp_spcesers, comp_timesers,
save_name=save_name, close_fig=close_fig)
def compute_triggers(self, triggers, time_window, trigger_savename=False):
# how many stacks before and after the trigger are you interested to see?
window = np.arange(round(-time_window*self.im_rate),round(time_window*self.im_rate))
triggers_arr = triggers.reshape(-1,1) + window
triggers_around = triggers_arr[(triggers_arr < self.nstacks).all(axis=1),:]
if trigger_savename:
np.save(self.savepath + trigger_savename + '.npy', triggers_around)
return window, triggers_around
def compute_triggered(self, triggers_around, comp_timesers, statistic='mean'):
triggered = comp_timesers[:,triggers_around]
if statistic == 'mean':
triggered_center = triggered.mean(axis=1)
elif statistic == 'median':
triggered_center = np.median(triggered, axis=1)
elif statistic == 'both':
triggered_center = (triggered.mean(axis=1), np.median(triggered, axis=1))
return triggered, triggered_center
def visualize_triggered(self, comp_num, window, triggered, triggered_mean, triggered_median,
comp_spcesers, comp_timesers, plot_trials=False):
from scipy.stats import sem
from math_helper import compute_fft
roi_fig, roi_ax = plt.subplots(1, 6, figsize=(16, 3))
clust_volume = self.find_component(comp_spcesers, comp_num)
vmax = np.max(np.array([np.max(clust_volume.max(0)),np.max(clust_volume.max(1))]))
vmin = np.min(np.array([np.min(clust_volume.max(0)),np.min(clust_volume.max(1))]))
# Plot brain and ROI (xy projection)
roi_ax[0].imshow(self.image_mean.max(0).T,cmap='gray')
roi_ax[0].imshow(clust_volume.max(0).T,alpha=0.5,vmin=vmin,vmax=vmax)
roi_ax[0].axis('off')
# Plot brain and ROI (yz projection)
roi_ax[1].imshow(self.image_mean.max(1).T,cmap='gray',aspect='auto')
clust_imshow = roi_ax[1].imshow(clust_volume.max(1).T,alpha=0.5,aspect='auto',vmin=vmin,vmax=vmax)
roi_ax[1].axis('off')
roi_fig.colorbar(clust_imshow, ax=roi_ax[1])
if plot_trials:
ntriggers = triggered[comp_num].shape[0]
t_axis = np.tile(window/self.im_rate,(ntriggers,1)).transpose()
roi_ax[2].plot(t_axis,triggered[comp_num].transpose(), color='#1f77b4', alpha=0.05)
roi_ax[3].plot(t_axis,triggered[comp_num].transpose(), color='#1f77b4', alpha=0.05)
roi_ax[2].plot(window/self.im_rate, triggered_mean[comp_num], color='#d62728', zorder=1e6)
roi_ax[3].plot(window/self.im_rate, triggered_mean[comp_num], color='#d62728', zorder=1e6)
roi_ax[2].plot(window/self.im_rate, triggered_median[comp_num], color='#E377C2', zorder=1e6)
roi_ax[3].plot(window/self.im_rate, triggered_median[comp_num], color='#E377C2', zorder=1e6)
# Plot error bars
if plot_trials:
error = sem(triggered[comp_num].transpose(),axis=1)
roi_ax[2].fill_between(window/self.im_rate, triggered_mean[comp_num]+error, triggered_mean[comp_num]-error,
color='#d62728',alpha=0.5,zorder=1e6-1)
roi_ax[3].fill_between(window/self.im_rate, triggered_mean[comp_num]+error, triggered_mean[comp_num]-error,
color='#d62728',alpha=0.5,zorder=1e6-1)
roi_ax[2].axvline(x=0,color='k',ls='--')
roi_ax[2].set_ylabel(r'$\Delta F/ F$')
roi_ax[2].set_xlabel('Time (s)')
roi_ax[2].set_xlim([window.min()/self.im_rate,window.max()/self.im_rate])
roi_ax[3].axvline(x=0,color='k',ls='--')
roi_ax[3].set_ylabel(r'$\Delta F/ F$')
roi_ax[3].set_xlabel('Time (s)')
roi_ax[3].set_xlim([window.min()/self.im_rate,window.max()/self.im_rate])
if plot_trials:
roi_ax[3].set_ylim([np.min(np.array([(triggered_mean[comp_num]-error).min(),triggered_median[comp_num].min()])),
np.max(np.array([(triggered_mean[comp_num]+error).max(),triggered_median[comp_num].max()]))])
# Plot raw calcium trace
roi_ax[4].plot(np.linspace(0,self.nstacks/self.im_rate,self.nstacks), comp_timesers[comp_num])
roi_ax[4].set_ylabel(r'$\Delta F/ F$')
roi_ax[4].set_xlabel('Time (s)')
slice_win = 10 # in seconds
rand_slice = np.random.randint(self.nstacks/self.im_rate - 10)
roi_ax[4].set_xlim([rand_slice, rand_slice+10])
# roi_ax[3].set_ylim([np.percentile(timeseries[clust],0.1), np.percentile(timeseries[clust],99.8)])
# Overlay swim power
roi_ax2 = roi_ax[4].twinx()
roi_ax2.plot(np.linspace(0,self.swim_power.shape[0]/self.ephys_rate,num=self.swim_power.shape[0]),
self.swim_power,color='#ff7f0e')
# roi_ax2.set_xlim([swim_power[0]*rand_slice/ephys_rate, swim_power[0]*rand_slice/ephys_rate])
roi_ax2.axis('off')
# Overlay flashes
roi_ax3 = roi_ax[4].twinx()
roi_ax3.plot(np.linspace(0, self.ep.channel4.shape[0]/self.ephys_rate,num= self.ep.channel4.shape[0]),
self.ep.channel4,color='#17becf')
roi_ax3.axis('off')
Y, angle, frq = compute_fft(comp_timesers[comp_num], self.im_rate)
roi_ax[5].plot(frq[1:],abs(Y)[1:])
roi_ax[5].set_xlabel('Freq (Hz)')
roi_ax[5].set_ylabel(r'|$\gamma$(freq)|')
# roi_ax[5].set_xlim([-0.001,0.5])
roi_fig.suptitle(str(self.expt_date.date())+'_'+self.expt_name)
roi_fig.tight_layout()
roi_fig.subplots_adjust(top = 0.8)
return roi_fig
def visualize_multiple_triggered(self, component_list, window, triggered, triggered_mean, triggered_median,
comp_spcesers, comp_timesers,
plot_trials=False, save_name=False, close_fig=True, output=None):
import datetime
num_comp = len(component_list)
delta = []
for comp_num in component_list:
print('Plotting ROI %i of %i ...' % (comp_num,num_comp))
roi_fig = self.visualize_triggered(comp_num, window, triggered, triggered_mean, triggered_median,
comp_spcesers, comp_timesers, plot_trials=plot_trials)
from scipy.stats import wilcoxon
t_stat,t_prob = wilcoxon(triggered[comp_num][:,np.logical_and(window/self.im_rate <= 0,
window/self.im_rate >= -1.)].mean(1),
triggered[comp_num][:,np.logical_and(window/self.im_rate > 0,
window/self.im_rate <= 1.)].mean(1))
print(t_stat,t_prob)
# save components with large change
if t_prob < 1e-10:
mark = 'o'
delta.append(comp_num)
else:
mark = 'x'
if save_name:
roi_fig.savefig(self.savepath+save_name+'-'+str(plot_trials)+'-'+str(comp_num)+
'-'+datetime.datetime.now().strftime("%Y-%m-%d_%H-%M")+'-'+mark+'.png')
if close_fig:
plt.close(roi_fig)
if output: output.put(delta)
else: return delta
def visualize_triggereds(self, component_list, window, triggered, triggered_mean, triggered_median,
comp_spcesers, comp_timesers,
plot_trials=False, save_name='visualize_triggered_comp', close_fig=True,
parallelize=False):
if parallelize:
import multiprocessing as mp
num_processes = min(mp.cpu_count(),len(component_list))
# divide clusters into all processes
components_list = np.array_split(component_list,num_processes)
output=mp.Queue()
processes = [mp.Process(target=self.visualize_multiple_triggered,
args=(components_list[proc], window, triggered, triggered_mean, triggered_median,
comp_spcesers, comp_timesers),
kwargs={"plot_trials": plot_trials,
"save_name": save_name,
"close_fig": True, "output": output}) \
for proc in range(num_processes)]
print("Starting %i processes..." % num_processes)
for p in processes: p.start()
for p in processes: p.join()
result = [output.get() for p in processes]
print("Done!")
return result
else:
result = self.visualize_multiple_triggered(component_list, window, triggered, triggered_mean, triggered_median,
comp_spcesers, comp_timesers, plot_trials=plot_trials,
save_name=save_name, close_fig=close_fig, output=None)
return result
def calculate_DFF(self, bg_multiplier=0.8, debug=False):
self.dff = (self.Cell_timesers1 - self.Cell_baseline1) / \
(self.Cell_baseline1 - self.background * bg_multiplier)
def check_NMF(self, comp_spcsers, comp_timesers, weight_percentile=99.5, save_name='component_ts'):
from colors import tableau20
import datetime
import random
dff = (self.Cell_timesers1 - self.Cell_baseline1) / (self.Cell_baseline1 - self.background * 0.8)
nclust = comp_spcsers.shape[0]
from analysis_toolbox.utils import get_transparent_cm
trans_inferno = get_transparent_cm('hot',tvmax=1,gradient=False)
clust_fig, clust_ax = plt.subplots(nclust, 6, figsize=(20,nclust*2),
gridspec_kw = {'width_ratios':[1,1,3,3,3,3]})
for clust in range(nclust):
clust_volume = self.find_component(comp_spcsers, clust)
vmax = np.max(np.array([np.max(clust_volume.max(0)),np.max(clust_volume.max(1))]))
vmin = np.min(np.array([np.min(clust_volume.max(0)),np.min(clust_volume.max(1))]))
# Plot brain and ROI (xy projection)
clust_ax[clust,0].imshow(self.image_mean.max(0).T,cmap='gray')
clust_ax[clust,0].imshow(clust_volume.max(0).T,cmap=trans_inferno,vmin=vmin,vmax=vmax)
clust_ax[clust,0].axis('off')
# Plot brain and ROI (zy projection)
clust_ax[clust,1].imshow(self.image_mean.max(1).T,cmap='gray',aspect='auto')
clust_imshow = clust_ax[clust,1].imshow(clust_volume.max(1).T,cmap=trans_inferno,vmin=vmin,vmax=vmax,aspect='auto')
clust_ax[clust,1].axis('off')
clust_fig.colorbar(clust_imshow, ax=clust_ax[clust,1])
# plot all weights
clust_ax[clust,2].plot(comp_spcsers[clust])
perc = weight_percentile
perct = np.percentile(comp_spcsers[clust],perc)
clust_ax[clust,2].axhline(y=perct, color='r', label=str(perct))
clust_ax[clust,2].text(25000,perct+0.1,"%.1f percentile: %.1f" % (perc, perct))
clust_ax[clust,2].set_ylabel('Weight')
clust_ax[clust,2].set_xlabel('Cell # (unsorted)')
# plot distribution of weights
clust_ax[clust,3].hist(np.ravel(comp_spcsers[clust]),bins=200)
clust_ax[clust,3].axvline(x=perct, color='r', label=str(perct))
clust_ax[clust,3].text(perct-0.6,10**3, "%.1f percentile: %.1f" % (perc, perct))
clust_ax[clust,3].set_yscale('log')
clust_ax[clust,3].set_xlabel('Weight')
clust_ax[clust,3].set_ylabel(r'$\log(Counts)$')
# plot comparison of time series
clust_ax[clust,4].plot(np.linspace(0,len(comp_timesers[clust])/self.im_rate,num=len(comp_timesers[clust])),
comp_timesers[clust])
# find highly weighted cells
clust_cells = np.where(comp_spcsers[clust] > perct)[0]
for cell in clust_cells:
clust_ax[clust,4].plot(np.linspace(0,len(dff[cell])/self.im_rate,num=len(dff[cell])),
dff[cell], alpha=0.4)
win_size = 10 # in seconds
randslice = random.randint(0, int(len(comp_timesers[clust])/self.im_rate - win_size))
clust_ax[clust,4].set_xlim([randslice, randslice+win_size])
clust_ax[clust,4].set_ylim([np.min([-0.1,np.percentile(comp_timesers[clust],0.1)]),np.percentile(comp_timesers[clust],99.5)])
clust_ax[clust,4].set_ylabel('$\Delta F / F$')
clust_ax[clust,4].set_xlabel('Time [s]')
# find the standard deviation
dff_std = np.std(dff[clust_cells],0)
clust_ax[clust,5].plot(np.linspace(0,len(comp_timesers[clust])/self.im_rate,num=len(comp_timesers[clust])),
comp_timesers[clust],color=tableau20[0])
clust_ax[clust,5].fill_between(np.linspace(0,len(comp_timesers[clust])/self.im_rate,num=len(comp_timesers[clust])),
comp_timesers[clust]-dff_std, comp_timesers[clust]+dff_std, alpha=0.8, color=tableau20[1])
clust_ax[clust,5].set_xlim([randslice, randslice+win_size])
clust_ax[clust,5].set_ylim([np.min([-0.1,np.percentile(comp_timesers[clust],0.1)]),np.percentile(comp_timesers[clust],99.5)])
clust_ax[clust,5].set_ylabel('$\Delta F / F$')
clust_ax[clust,5].set_xlabel('Time [s]')
clust_fig.tight_layout()
clust_fig.savefig(self.savepath+save_name+'-'+datetime.datetime.now().strftime("%Y-%m-%d_%H-%M")+'.png')
######################################################################
### mika_helper.py ends here
``` |
{
"source": "jingxlim/voluseg",
"score": 2
} |
#### File: voluseg/_steps/step4d.py
```python
def nnmf_sparse(V0, XYZ0, W0, B0, S0, tolfun=1e-4, miniter=10, maxiter=100,
timeseries_mean=1.0, timepoints=None, verbosity=1):
'''
cell detection via nonnegative matrix factorization with sparseness projection
V0 = voxel_timeseries_valid
XYZ0 = voxel_xyz_valid
W0 = cell_weight_init_valid
B0 = cell_neighborhood_valid
S0 = cell_sparseness
'''
import os
import numpy as np
from scipy import stats
from scipy import linalg
from skimage import measure
from voluseg._tools.sparseness_projection import sparseness_projection
os.environ['MKL_NUM_THREADS'] = '1'
# CAUTION: variable is modified in-place to save memory
V0 *= (timeseries_mean / V0.mean(1)[:, None]) # normalize voxel timeseries
if timepoints is not None:
V = V0[:, timepoints].astype(float) # copy input signal
else:
V = V0.astype(float) # copy input signal
XYZ = XYZ0.astype(int)
W = W0.astype(float)
B = B0.astype(bool)
S = S0.copy()
# get dimensions
n, t = V.shape
n_, c = W.shape
assert(n_ == n)
H = np.zeros((c, t)) # zero timeseries array
dnorm_prev = np.full(2, np.inf) # last two d-norms
for ii in range(maxiter):
# save current states
H_ = H.copy()
# Alternate least squares with regularization
H = np.maximum(linalg.lstsq(W, V)[0], 0)
H *= (timeseries_mean / H.mean(1)[:, None]) # normalize component timeseries
W = np.maximum(linalg.lstsq(V.T, H.T)[0], 0)
W[np.logical_not(B)] = 0 # restrict component boundaries
for ci in range(c):
W_ci = W[B[:, ci], ci]
if np.any(W_ci) and (S[ci] > 0):
# get relative dimensions of component
XYZ_ci = XYZ[B[:, ci]] - XYZ[B[:, ci]].min(0)
# enforce component sparseness and percentile threshold
W_ci = sparseness_projection(W_ci, S[ci], at_least_as_sparse=True)
# retain largest connected component (mode)
L_ci = np.zeros(np.ptp(XYZ_ci, 0) + 1, dtype=bool)
L_ci[tuple(zip(*XYZ_ci))] = W_ci > 0
L_ci = measure.label(L_ci, connectivity=3)
lci_mode = stats.mode(L_ci[L_ci>0]).mode[0]
W_ci[L_ci[tuple(zip(*XYZ_ci))] != lci_mode] = 0
W[B[:, ci], ci] = W_ci
# Get norm of difference and check for convergence
dnorm = np.sqrt(np.mean(np.square(V - W.dot(H)))) / timeseries_mean
diffh = np.sqrt(np.mean(np.square(H - H_ ))) / timeseries_mean
if ((dnorm_prev.max(0) - dnorm) < tolfun) & (diffh < tolfun):
if (ii >= miniter):
break
dnorm_prev[1] = dnorm_prev[0]
dnorm_prev[0] = dnorm
if verbosity:
print((ii, dnorm, diffh))
# Perform final regression on full input timeseries
H = np.maximum(linalg.lstsq(W, V0)[0], 0)
H *= (timeseries_mean / H.mean(1)[:, None]) # normalize component timeseries
return (W, H, dnorm)
```
#### File: voluseg/_tools/evenly_parallelize.py
```python
def evenly_parallelize(input_list):
'''return evenly partitioned spark resilient distributed dataset (RDD)'''
import numpy as np
from pyspark.sql.session import SparkSession
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
n_input = len(input_list)
n_parts = sc.parallelize(input_list).getNumPartitions()
partitions = np.floor(np.linspace(0, n_parts, n_input, endpoint=False)).astype(int)
return sc.parallelize(zip(partitions, input_list)).partitionBy(n_parts)
```
#### File: voluseg/_tools/sparseness_projection.py
```python
def sparseness_projection(Si, s, at_least_as_sparse=False):
'''Hoyer sparseness projection'''
import numpy as np
assert(Si.ndim == 1)
S = np.copy(Si) # copy input signal
if s <= 0:
return np.maximum(S, 0) # enforce nonnegativity
d = S.size
L2 = np.sqrt(np.sum(np.square(S))) # fixed l2-norm
L1 = L2 * (np.sqrt(d) * (1 - s) + s) # desired l1-norm
# quit if at_least_sparse=True and original exceeds target sparseness
if at_least_as_sparse:
if L1 >= np.sum(np.abs(S)):
return S
# initialize components with negative values
Z = np.zeros(S.shape, dtype=bool)
negatives = True
while negatives:
# Fix components with negative values at 0
Z = Z | (S < 0)
S[Z] = 0
# Project to the sum-constraint hyperplane
S += (L1 - np.sum(S)) / (d - np.sum(Z))
S[Z] = 0
# Get midpoints of hyperplane, M
M = np.tile(L1 / (d - np.sum(Z)), d)
M[Z] = 0
P = S - M
# Solve for Alph, L2 = l2[M + Alph*(S-M)] = l2[P*Alph + M],
# where L2 is defined above, and l2 is the l2-norm operator.
# For convenience, we square both sides and find the roots,
# 0 = (l2[P*Alph + M])^2 - (L2)^2
# 0 = sum((P*Alph)^2) + sum(2*P*M*Alph) + sum(M^2) - L2^2
A = np.sum(P * P)
B = 2 * np.sum(P * M)
C = np.sum(M * M) - L2**2
Alph = (-B + np.real(np.sqrt(B**2 - 4 * A * C))) / (2 * A)
# Project within the sum-constraint hyperplane to match L2
S = M + Alph * P
# Check for negative values in solution
negatives = np.any(S < 0)
return S
``` |
{
"source": "jingxu10/medical-decathlon",
"score": 2
} |
#### File: 2D/openvino/inference_openvino.py
```python
from openvino.inference_engine import IENetwork, IECore
from time import time
import logging as log
import numpy as np
from argparse import ArgumentParser
import os
import sys
"""
OpenVINO Python Inference Script
This will load the OpenVINO version of the model (IR)
and perform inference on a few validation samples
from the Decathlon dataset.
You'll need the extension library to handle the Resize_Bilinear operations.
python inference_openvino.py -l ${INTEL_OPENVINO_DIR}/inference_engine/lib/intel64/libcpu_extension_avx2.so
"""
def dice_score(y_true, y_pred, smooth=1.):
"""
Sorensen Dice coefficient
"""
y_true = np.round(y_true)
y_pred = np.round(y_pred)
numerator = 2.0 * np.sum(y_true * y_pred) + smooth
denominator = np.sum(y_true) + np.sum(y_pred) + smooth
coef = numerator / denominator
return coef
def plot_predictions(predictions, input_data, label_data, img_indicies, args):
"""
Plot the predictions with matplotlib and save to png files
"""
png_directory = "inference_examples_openvino"
if not os.path.exists(png_directory):
os.makedirs(png_directory)
import matplotlib.pyplot as plt
# Processing output blob
log.info("Plotting the predictions and saving to png files. Please wait...")
number_imgs = predictions.shape[0]
num_rows_per_image = args.rows_per_image
row = 0
for idx in range(number_imgs):
if row == 0:
plt.figure(figsize=(15, 15))
plt.subplot(num_rows_per_image, 3, 1+row*3)
plt.imshow(input_data[idx, 0, :, :], cmap="bone", origin="lower")
plt.axis("off")
if row == 0:
plt.title("MRI")
plt.subplot(num_rows_per_image, 3, 2+row*3)
plt.imshow(label_data[idx, 0, :, :], origin="lower")
plt.axis("off")
if row == 0:
plt.title("Ground truth")
plt.subplot(num_rows_per_image, 3, 3+row*3)
plt.imshow(predictions[idx, 0, :, :], origin="lower")
plt.axis("off")
if row == 0:
plt.title("Prediction")
plt.tight_layout()
if (row == (num_rows_per_image-1)) or (idx == (number_imgs-1)):
if num_rows_per_image == 1:
fileidx = "pred{}.png".format(img_indicies[idx])
else:
fileidx = "pred_group{}".format(idx // num_rows_per_image)
filename = os.path.join(png_directory, fileidx)
plt.savefig(filename,
bbox_inches="tight", pad_inches=0)
print("Saved file: {}".format(filename))
row = 0
else:
row += 1
def load_data():
"""
Modify this to load your data and labels
"""
# Load data
# You can create this Numpy datafile by running the create_validation_sample.py script
data_file = np.load("data/validation_data.npz")
imgs_validation = data_file["imgs_validation"]
msks_validation = data_file["msks_validation"]
img_indicies = data_file["indicies_validation"]
"""
OpenVINO uses channels first tensors (NCHW).
TensorFlow usually does channels last (NHWC).
So we need to transpose the axes.
"""
input_data = imgs_validation.transpose((0, 3, 1, 2))
msks_data = msks_validation.transpose((0, 3, 1, 2))
return input_data, msks_data, img_indicies
def load_model(model, fp16=False):
"""
Load the OpenVINO model.
"""
log.info("Loading U-Net model to the plugin")
model_xml = model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
return model_xml, model_bin
def print_stats(exec_net, input_data, n_channels, batch_size, input_blob, out_blob, args):
"""
Prints layer by layer inference times.
Good for profiling which ops are most costly in your model.
"""
# Start sync inference
log.info("Starting inference ({} iterations)".format(args.number_iter))
infer_time = []
for i in range(args.number_iter):
t0 = time()
res = exec_net.infer(
inputs={input_blob: input_data[0:batch_size, :n_channels]})
infer_time.append((time() - t0) * 1000)
average_inference = np.average(np.asarray(infer_time))
log.info("Average running time of one batch: {:.5f} ms".format(
average_inference))
log.info("Images per second = {:.3f}".format(
batch_size * 1000.0 / average_inference))
perf_counts = exec_net.requests[0].get_perf_counts()
log.info("Performance counters:")
log.info("{:<70} {:<15} {:<15} {:<15} {:<10}".format("name",
"layer_type",
"exec_type",
"status",
"real_time, us"))
for layer, stats in perf_counts.items():
log.info("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer,
stats["layer_type"],
stats["exec_type"],
stats["status"],
stats["real_time"]))
def build_argparser():
parser = ArgumentParser()
parser.add_argument("-number_iter", "--number_iter",
help="Number of iterations", default=5, type=int)
parser.add_argument("-l", "--cpu_extension",
help="MKLDNN (CPU)-targeted custom layers. "
"Absolute path to a shared library with "
"the kernels impl.", type=str)
parser.add_argument("-pp", "--plugin_dir", help="Path to a plugin folder",
type=str, default=None)
parser.add_argument("-d", "--device",
help="Specify the target device to infer on; "
"CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device "
"specified (CPU by default)", default="CPU",
type=str)
parser.add_argument("-plot", "--plot", help="Plot results",
default=False, action="store_true")
parser.add_argument("-rows_per_image", "--rows_per_image",
help="Number of rows per plot (when -plot = True)",
default=4, type=int)
parser.add_argument("-stats", "--stats", help="Plot the runtime statistics",
default=False, action="store_true")
parser.add_argument("-m", "--model", help="OpenVINO model filename",
default="../openvino_models/FP32/saved_model.xml")
return parser
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s",
level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
ie = IECore()
if args.cpu_extension and "CPU" in args.device:
ie.add_extension(args.cpu_extension, "CPU")
# Read IR
model_xml, model_bin = load_model(args.model, args.device=="MYRIAD")
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = IENetwork(model=model_xml, weights=model_bin)
if "CPU" in args.device:
supported_layers = ie.query_network(net, "CPU")
not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
format(args.device, ', '.join(not_supported_layers)))
log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
"or --cpu_extension command line argument")
sys.exit(1)
"""
Ask OpenVINO for input and output tensor names and sizes
"""
input_blob = next(iter(net.inputs)) # Name of the input layer
out_blob = next(iter(net.outputs)) # Name of the output layer
# Load data
input_data, label_data, img_indicies = load_data()
batch_size = 1
n_channels = input_data.shape[1]
height = input_data.shape[2]
width = input_data.shape[3]
# Reshape the OpenVINO network to accept the different image input shape
# NOTE: This only works for some models (e.g. fully convolutional)
net.reshape({input_blob:(batch_size,n_channels,height,width)})
batch_size, n_channels, height, width = net.inputs[input_blob].shape
batch_size, n_out_channels, height_out, width_out = net.outputs[out_blob].shape
# Loading model to the plugin
log.info("Loading model to the plugin")
exec_net = ie.load_network(network=net, device_name=args.device)
del net
if args.stats:
# Print the latency and throughput for inference
print_stats(exec_net, input_data, n_channels,
batch_size, input_blob, out_blob, args)
"""
OpenVINO inference code
input_blob is the name (string) of the input tensor in the graph
out_blob is the name (string) of the output tensor in the graph
Essentially, this looks exactly like a feed_dict for TensorFlow inference
"""
# Go through the sample validation dataset to plot predictions
predictions = np.zeros((img_indicies.shape[0], n_out_channels,
height_out, width_out))
for idx in range(0, img_indicies.shape[0], batch_size):
res = exec_net.infer(inputs={input_blob:
input_data[idx:(idx+batch_size),
:n_channels]})
# Save the predictions to array
predictions[idx:(idx+batch_size), ] = res[out_blob]
if idx != (len(img_indicies)-1): # Partial batch left in data
log.info("Partial batch left over in dataset.")
"""
Evaluate model with Dice metric
"""
for idx in range(img_indicies.shape[0]):
dice = dice_score(predictions[idx, 0, :, :], label_data[idx, 0, :, :])
log.info("Image #{}: Dice score = {:.4f}".format(
img_indicies[idx], dice))
if args.plot:
plot_predictions(predictions, input_data,
label_data, img_indicies, args)
del exec_net
if __name__ == '__main__':
sys.exit(main() or 0)
```
#### File: medical-decathlon/3D/inference_openvino.py
```python
import sys
import os
import csv
import numpy as np
import logging as log
from time import time
from openvino.inference_engine import IENetwork, IECore
import tensorflow as tf
import keras as K
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import nibabel as nib
from tqdm import tqdm
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # Get rid of the AVX, SSE warnings
"""
OpenVINO Python Inference Script
This will load the OpenVINO version of the model (IR)
and perform inference on a few validation samples
from the Decathlon dataset.
"""
def dice_score(pred, truth):
"""
Sorensen Dice score
Measure of the overlap between the prediction and ground truth masks
"""
numerator = np.sum(np.round(pred) * truth) * 2.0
denominator = np.sum(np.round(pred)) + np.sum(truth)
return numerator / denominator
def crop_img(img, msk, crop_dim, n_channels, n_out_channels):
"""
Crop the image and mask
"""
number_of_dimensions = len(crop_dim)
slices = []
for idx in range(number_of_dimensions): # Go through each dimension
cropLen = crop_dim[idx]
imgLen = img.shape[idx]
start = (imgLen-cropLen)//2
slices.append(slice(start, start+cropLen))
# No slicing along channels
slices_img = slices.copy()
slices_msk = slices.copy()
slices_img.append(slice(0, n_channels))
slices_msk.append(slice(0, n_out_channels))
return img[tuple(slices_img)], msk[tuple(slices_msk)]
def z_normalize_img(img):
"""
Normalize the image so that the mean value for each image
is 0 and the standard deviation is 1.
"""
for channel in range(img.shape[-1]):
img_temp = img[..., channel]
img_temp = (img_temp - np.mean(img_temp)) / np.std(img_temp)
img[..., channel] = img_temp
return img
def load_data(imgFile, mskFile, crop_dim, n_channels, n_out_channels, openVINO_order=True):
"""
Modify this to load your data and labels
"""
imgs = np.empty((len(imgFile),*crop_dim,n_channels))
msks = np.empty((len(mskFile),*crop_dim,n_out_channels))
fileIDs = []
for idx in range(len(imgFile)):
img_temp = np.array(nib.load(imgFile[idx]).dataobj)
msk = np.array(nib.load(mskFile[idx]).dataobj)
if n_channels == 1:
img = img_temp[:, :, :, [0]] # FLAIR channel
else:
img = img_temp
# Add channels to mask
msk[msk > 0] = 1.0
msk = np.expand_dims(msk, -1)
# Crop the image to the input size
img, msk = crop_img(img, msk, crop_dim, n_channels, n_out_channels)
# z-normalize the pixel values
img = z_normalize_img(img)
fileIDs.append(os.path.basename(imgFile[idx]))
imgs[idx] = img
msks[idx] = msk
if openVINO_order:
imgs = imgs.transpose((0, 4, 1, 2, 3))
msks = msks.transpose((0, 4, 1, 2, 3))
return imgs, msks, fileIDs
def load_model(model_xml, fp16=False):
"""
Load the OpenVINO model.
"""
log.info("Loading U-Net model to the plugin")
model_bin = os.path.splitext(model_xml)[0] + ".bin"
return model_xml, model_bin
def print_stats(exec_net, input_data, n_channels, batch_size, input_blob, out_blob, args):
"""
Prints layer by layer inference times.
Good for profiling which ops are most costly in your model.
"""
# Start sync inference
log.info("Starting inference ({} iterations)".format(args.number_iter))
log.info("Number of input channels = {}".format(n_channels))
log.info("Input data shape = {}".format(input_data.shape))
infer_time = []
for i in range(args.number_iter):
t0 = time()
res = exec_net.infer(
inputs={input_blob: input_data[0:batch_size, :n_channels]})
infer_time.append((time() - t0) * 1000)
average_inference = np.average(np.asarray(infer_time))
log.info("Average running time of one batch: {:.5f} ms".format(
average_inference))
log.info("Images per second = {:.3f}".format(
batch_size * 1000.0 / average_inference))
perf_counts = exec_net.requests[0].get_perf_counts()
log.info("Performance counters:")
log.info("{:<70} {:<15} {:<15} {:<15} {:<10}".format("name",
"layer_type",
"exec_type",
"status",
"real_time, us"))
for layer, stats in perf_counts.items():
log.info("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer,
stats["layer_type"],
stats["exec_type"],
stats["status"],
stats["real_time"]))
def build_argparser():
parser = ArgumentParser(description="Performs inference using OpenVINO. Compares to Keras model.",
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-number_iter", "--number_iter",
help="Number of iterations", default=5, type=int)
parser.add_argument("-l", "--cpu_extension",
help="MKLDNN (CPU)-targeted custom layers. "
"Absolute path to a shared library with "
"the kernels impl.", type=str)
parser.add_argument("-pp", "--plugin_dir", help="Path to a plugin folder",
type=str, default=None)
parser.add_argument("-d", "--device",
help="Specify the target device to infer on; "
"CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device "
"specified (CPU by default)", default="CPU",
type=str)
parser.add_argument("-stats", "--stats", help="Plot the runtime statistics",
default=False, action="store_true")
parser.add_argument("-plot", "--plot", help="Plot the predictions",
default=False, action="store_true")
parser.add_argument("--csv_file",
default="test.csv",
help="CSV list of files to test")
parser.add_argument("--openvino_model", type=str, help="The saved OpenVINO XML file",
default="./openvino_models/FP32/3d_unet_decathlon.xml")
parser.add_argument("--keras_model", type=str, help="Keras model filename",
default="./saved_model/3d_unet_decathlon.hdf5")
return parser
def read_csv_file(filename):
"""
Read the CSV file with the image and mask filenames
"""
imgFiles = []
mskFiles = []
with open(filename, "rt") as f:
data = csv.reader(f)
for row in data:
if len(row) > 0:
imgFiles.append(row[0])
mskFiles.append(row[1])
return imgFiles, mskFiles, len(imgFiles)
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s",
level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
log.info(args)
log.info("Loading test data from file: {}".format(args.csv_file))
ie = IECore()
if args.cpu_extension and "CPU" in args.device:
ie.add_extension(args.cpu_extension, "CPU")
# Read IR
model_xml, model_bin = load_model(args.openvino_model, args.device=="MYRIAD")
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = IENetwork(model=model_xml, weights=model_bin)
if "CPU" in args.device:
supported_layers = ie.query_network(net, "CPU")
not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
format(args.device, ', '.join(not_supported_layers)))
log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
"or --cpu_extension command line argument")
sys.exit(1)
"""
Ask OpenVINO for input and output tensor names and sizes
"""
input_blob = next(iter(net.inputs)) # Name of the input layer
out_blob = next(iter(net.outputs)) # Name of the output layer
# Load data
batch_size, n_channels, height, width, depth = net.inputs[input_blob].shape
batch_size, n_out_channels, height_out, width_out, depth_out = net.outputs[out_blob].shape
crop_dim = [height, width, depth]
"""
Read the CSV file with the filenames of the images and masks
"""
imgFiles, mskFiles, num_imgs = read_csv_file(args.csv_file)
"""
Load the data for OpenVINO
"""
input_data, label_data_ov, img_indicies = load_data(imgFiles, mskFiles,
crop_dim, n_channels, n_out_channels, openVINO_order=True)
# Reshape the OpenVINO network to accept the different image input shape
# NOTE: This only works for some models (e.g. fully convolutional)
batch_size = 1
n_channels = input_data.shape[1]
height = input_data.shape[2]
width = input_data.shape[3]
depth = input_data.shape[4]
net.reshape({input_blob:(batch_size,n_channels,height,width,depth)})
batch_size, n_channels, height, width, depth = net.inputs[input_blob].shape
batch_size, n_out_channels, height_out, width_out, depth_out = net.outputs[out_blob].shape
log.info("The network inputs are:")
for idx, input_layer in enumerate(net.inputs.keys()):
log.info("{}: {}, shape = {} [N,C,H,W,D]".format(idx,input_layer,net.inputs[input_layer].shape))
log.info("The network outputs are:")
for idx, output_layer in enumerate(net.outputs.keys()):
log.info("{}: {}, shape = {} [N,C,H,W,D]".format(idx,output_layer,net.outputs[output_layer].shape))
# Loading model to the plugin
log.info("Loading model to the plugin")
exec_net = ie.load_network(network=net, device_name=args.device)
del net
if args.stats:
# Print the latency and throughput for inference
print_stats(exec_net, input_data, n_channels,
batch_size, input_blob, out_blob, args)
"""
OpenVINO inference code
input_blob is the name (string) of the input tensor in the graph
out_blob is the name (string) of the output tensor in the graph
Essentially, this looks exactly like a feed_dict for TensorFlow inference
"""
# Go through the sample validation dataset to plot predictions
predictions_ov = np.zeros((num_imgs, n_out_channels,
depth_out, height_out, width_out))
log.info("Starting OpenVINO inference")
ov_times = []
for idx in tqdm(range(0, num_imgs)):
start_time = time()
res = exec_net.infer(inputs={input_blob: input_data[[idx],:n_channels]})
ov_times.append(time() - start_time)
predictions_ov[idx, ] = res[out_blob]
#print("{}, {}".format(imgFiles[idx], dice_score(res[out_blob],label_data_ov[idx])))
log.info("Finished OpenVINO inference")
del exec_net
"""
Load the data for Keras
"""
input_data, label_data_keras, img_indicies = load_data(imgFiles, mskFiles,
crop_dim, n_channels, n_out_channels,
openVINO_order=False)
# Load OpenVINO model for inference
model = K.models.load_model(args.keras_model, compile=False)
# Inference only Keras
K.backend._LEARNING_PHASE = tf.constant(0)
K.backend.set_learning_phase(False)
K.backend.set_learning_phase(0)
K.backend.set_image_data_format("channels_last")
predictions_keras = np.zeros((num_imgs,
height_out, width_out, depth_out, n_out_channels))
log.info("Starting Keras inference")
keras_times = []
for idx in tqdm(range(num_imgs)):
start_time = time()
res = model.predict(input_data[[idx],...,:n_channels])
keras_times.append(time() - start_time)
#print("{}, {}".format(imgFiles[idx], dice_score(res,label_data_keras[idx])))
predictions_keras[idx] = res
log.info("Finished Keras inference")
save_directory = "predictions_openvino"
try:
os.stat(save_directory)
except:
os.mkdir(save_directory)
"""
Evaluate model with Dice metric
"""
out_channel = 0
for idx in tqdm(range(num_imgs)):
filename = os.path.splitext(os.path.splitext(img_indicies[idx])[0])[0]
img = input_data[idx,...,:n_channels]
ground_truth = label_data_keras[idx, :, :, :, out_channel]
# Transpose the OpenVINO prediction back to NCHWD (to be consistent with Keras)
pred_ov = np.transpose(predictions_ov, [0,2,3,4,1])[idx, :, :, :, out_channel]
pred_keras = predictions_keras[idx, :, :, :, out_channel]
dice_ov = dice_score(pred_ov, ground_truth)
dice_keras = dice_score(pred_keras, ground_truth)
img_nib = nib.Nifti1Image(img, np.eye(4))
img_nib.to_filename(os.path.join(save_directory,
"{}_img.nii.gz".format(filename)))
msk_nib = nib.Nifti1Image(ground_truth, np.eye(4))
msk_nib.to_filename(os.path.join(save_directory,
"{}_msk.nii.gz".format(filename)))
pred_ov_nib = nib.Nifti1Image(pred_ov, np.eye(4))
pred_ov_nib.to_filename(os.path.join(save_directory,
"{}_pred_ov.nii.gz".format(filename)))
log.info("Image file {}: OpenVINO Dice score = {:f}, "
"Keras/TF Dice score = {:f}, Maximum absolute pixel difference OV versus Keras/TF = {:.2e}".format(
img_indicies[idx], dice_ov, dice_keras, np.mean(np.abs(pred_ov - pred_keras))))
log.info("Average inference time: \n"
"OpenVINO = {} seconds (s.d. {})\n "
"Keras/TF = {} seconds (s.d. {})\n".format(np.mean(ov_times),
np.std(ov_times),
np.mean(keras_times),
np.std(keras_times)))
log.info("Raw OpenVINO inference times = {} seconds".format(ov_times))
log.info("Raw Keras inference times = {} seconds".format(keras_times))
if __name__ == '__main__':
sys.exit(main() or 0)
```
#### File: medical-decathlon/testing/testing.py
```python
import numpy as np
import os
import argparse
import psutil
import time
import datetime
import tensorflow as tf
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model.signature_def_utils import predict_signature_def
from tensorflow.python.saved_model import tag_constants
parser = argparse.ArgumentParser(
description="Sanity testing for 3D and 2D Convolution Models",add_help=True)
parser.add_argument("--dim_length",
type = int,
default=16,
help="Tensor cube length of side")
parser.add_argument("--num_channels",
type = int,
default=1,
help="Number of channels")
parser.add_argument("--num_outputs",
type = int,
default=1,
help="Number of outputs")
parser.add_argument("--bz",
type = int,
default=1,
help="Batch size")
parser.add_argument("--lr",
type = float,
default=0.001,
help="Learning rate")
parser.add_argument("--num_datapoints",
type = int,
default=1024,
help="Number of datapoints")
parser.add_argument("--epochs",
type = int,
default=3,
help="Number of epochs")
parser.add_argument("--intraop_threads",
type = int,
default=psutil.cpu_count(logical=False),
help="Number of intraop threads")
parser.add_argument("--interop_threads",
type = int,
default=2,
help="Number of interop threads")
parser.add_argument("--blocktime",
type = int,
default=0,
help="Block time for CPU threads")
parser.add_argument("--print_model",
action="store_true",
default=False,
help="Print the summary of the model layers")
parser.add_argument("--use_upsampling",
action="store_true",
default=False,
help="Use upsampling instead of transposed convolution")
parser.add_argument("--D2",
action="store_true",
default=False,
help="Use 2D model and images instead of 3D.")
parser.add_argument("--single_class_output",
action="store_true",
default=False,
help="Use binary classifier instead of U-Net")
parser.add_argument("--mkl_verbose",
action="store_true",
default=False,
help="Print MKL debug statements.")
parser.add_argument("--inference",
action="store_true",
default=False,
help="Test inference speed. Default=Test training speed")
parser.add_argument("--ngraph",
action="store_true",
default=False,
help="Use ngraph")
parser.add_argument("--keras_api",
action="store_true",
default=False,
help="Use Keras API. False=Use tf.keras")
parser.add_argument("--channels_first",
action="store_true",
default=False,
help="Channels first. NCHW")
args = parser.parse_args()
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # Get rid of the AVX, SSE warnings
if args.mkl_verbose:
os.environ["MKL_VERBOSE"] = "1" # Print out messages from MKL operations
os.environ["MKLDNN_VERBOSE"] = "1" # Print out messages from MKL-DNN operations
os.environ["OMP_NUM_THREADS"] = str(args.intraop_threads)
os.environ["KMP_BLOCKTIME"] = str(args.blocktime)
os.environ["KMP_AFFINITY"] = "granularity=thread,compact,1,0"
print("Started script on {}".format(datetime.datetime.now()))
print("args = {}".format(args))
os.system("uname -a")
print("TensorFlow version: {}".format(tf.__version__))
if args.keras_api:
import keras as K
print("Using Keras API")
else:
from tensorflow import keras as K
print("Using tf.keras")
if args.ngraph:
print("Using nGraph")
import ngraph_bridge
if args.channels_first:
os.environ["NGRAPH_PASS_ENABLES"]="CPUReshapeSinking:1;ReshapeElimination:1"
print("Keras API version: {}".format(K.__version__))
if args.D2: # Define shape of the tensors (2D)
dims = (1,2)
if args.channels_first:
tensor_shape = (args.num_channels,
args.dim_length,
args.dim_length)
out_shape = (args.num_outputs,
args.dim_length,
args.dim_length)
else:
tensor_shape = (args.dim_length,
args.dim_length,
args.num_channels)
out_shape = (args.dim_length,
args.dim_length,
args.num_outputs)
else: # Define shape of the tensors (3D)
dims=(1,2,3)
if args.channels_first:
tensor_shape = (args.num_channels,
args.dim_length,
args.dim_length,
args.dim_length)
tensor_shape = (args.num_outputs,
args.dim_length,
args.dim_length,
args.dim_length)
else:
tensor_shape = (args.dim_length,
args.dim_length,
args.dim_length,
args.num_channels)
tensor_shape = (args.dim_length,
args.dim_length,
args.dim_length,
args.num_outputs)
# Optimize CPU threads for TensorFlow
config = tf.ConfigProto(
inter_op_parallelism_threads=args.interop_threads,
intra_op_parallelism_threads=args.intraop_threads)
# Configure only as much GPU memory as needed during runtime
# Default is to use the entire GPU memory
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.backend.set_session(sess)
def dice_coef(y_true, y_pred, axis=(1,2,3), smooth=1.0):
intersection = tf.reduce_sum(y_true * K.backend.round(y_pred), axis=axis)
union = tf.reduce_sum(y_true + K.backend.round(y_pred), axis=axis)
numerator = tf.constant(2.) * intersection + smooth
denominator = union + smooth
coef = numerator / denominator
return tf.reduce_mean(coef)
def dice_coef_loss(target, prediction, axis=(1,2,3), smooth=1.0):
"""
Sorenson Dice loss
Using -log(Dice) as the loss since it is better behaved.
Also, the log allows avoidance of the division which
can help prevent underflow when the numbers are very small.
"""
intersection = tf.reduce_sum(prediction * target, axis=axis)
p = tf.reduce_sum(prediction, axis=axis)
t = tf.reduce_sum(target, axis=axis)
numerator = tf.reduce_mean(2. * intersection + smooth)
denominator = tf.reduce_mean(t + p + smooth)
dice_loss = -tf.log(numerator) + tf.log(denominator)
return dice_loss
if args.channels_first:
concat_axis = -1
data_format = "channels_first"
else:
concat_axis = -1
data_format = "channels_last"
def unet3D(input_img, use_upsampling=False, n_out=1, dropout=0.2,
print_summary = False, return_model=False):
"""
3D U-Net model
"""
print("3D U-Net Segmentation")
inputs = K.layers.Input(shape=input_img, name="Input_Image")
params = dict(kernel_size=(3, 3, 3), activation=None,
padding="same", data_format=data_format,
kernel_initializer="he_uniform")
conv1 = K.layers.Conv3D(name="conv1a", filters=32, **params)(inputs)
conv1 = K.layers.BatchNormalization()(conv1)
conv1 = K.layers.Activation("relu")(conv1)
conv1 = K.layers.Conv3D(name="conv1b", filters=64, **params)(conv1)
conv1 = K.layers.BatchNormalization()(conv1)
conv1 = K.layers.Activation("relu")(conv1)
pool1 = K.layers.MaxPooling3D(name="pool1", pool_size=(2, 2, 2))(conv1)
conv2 = K.layers.Conv3D(name="conv2a", filters=64, **params)(pool1)
conv2 = K.layers.BatchNormalization()(conv2)
conv2 = K.layers.Activation("relu")(conv2)
conv2 = K.layers.Conv3D(name="conv2b", filters=128, **params)(conv2)
conv2 = K.layers.BatchNormalization()(conv2)
conv2 = K.layers.Activation("relu")(conv2)
pool2 = K.layers.MaxPooling3D(name="pool2", pool_size=(2, 2, 2))(conv2)
conv3 = K.layers.Conv3D(name="conv3a", filters=128, **params)(pool2)
conv3 = K.layers.BatchNormalization()(conv3)
conv3 = K.layers.Activation("relu")(conv3)
conv3 = K.layers.Dropout(dropout)(conv3) ### Trying dropout layers earlier on, as indicated in the paper
conv3 = K.layers.Conv3D(name="conv3b", filters=256, **params)(conv3)
conv3 = K.layers.BatchNormalization()(conv3)
conv3 = K.layers.Activation("relu")(conv3)
pool3 = K.layers.MaxPooling3D(name="pool3", pool_size=(2, 2, 2))(conv3)
conv4 = K.layers.Conv3D(name="conv4a", filters=256, **params)(pool3)
conv4 = K.layers.BatchNormalization()(conv4)
conv4 = K.layers.Activation("relu")(conv4)
conv4 = K.layers.Dropout(dropout)(conv4) ### Trying dropout layers earlier on, as indicated in the paper
conv4 = K.layers.Conv3D(name="conv4b", filters=512, **params)(conv4)
conv4 = K.layers.BatchNormalization()(conv4)
conv4 = K.layers.Activation("relu")(conv4)
if use_upsampling:
up = K.layers.UpSampling3D(name="up4", size=(2, 2, 2))(conv4)
else:
up = K.layers.Conv3DTranspose(name="transConv4", filters=512, data_format=data_format,
kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")(conv4)
up4 = K.layers.concatenate([up, conv3], axis=concat_axis)
conv5 = K.layers.Conv3D(name="conv5a", filters=256, **params)(up4)
conv5 = K.layers.BatchNormalization()(conv5)
conv5 = K.layers.Activation("relu")(conv5)
conv5 = K.layers.Conv3D(name="conv5b", filters=256, **params)(conv5)
conv5 = K.layers.BatchNormalization()(conv5)
conv5 = K.layers.Activation("relu")(conv5)
if use_upsampling:
up = K.layers.UpSampling3D(name="up5", size=(2, 2, 2))(conv5)
else:
up = K.layers.Conv3DTranspose(name="transConv5", filters=256, data_format=data_format,
kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")(conv5)
up5 = K.layers.concatenate([up, conv2], axis=concat_axis)
conv6 = K.layers.Conv3D(name="conv6a", filters=128, **params)(up5)
conv6 = K.layers.BatchNormalization()(conv6)
conv6 = K.layers.Activation("relu")(conv6)
conv6 = K.layers.Conv3D(name="conv6b", filters=128, **params)(conv6)
conv6 = K.layers.BatchNormalization()(conv6)
conv6 = K.layers.Activation("relu")(conv6)
if use_upsampling:
up = K.layers.UpSampling3D(name="up6", size=(2, 2, 2))(conv6)
else:
up = K.layers.Conv3DTranspose(name="transConv6", filters=128, data_format=data_format,
kernel_size=(2, 2, 2), strides=(2, 2, 2), padding="same")(conv6)
up6 = K.layers.concatenate([up, conv1], axis=concat_axis)
conv7 = K.layers.Conv3D(name="conv7a", filters=64, **params)(up6)
conv7 = K.layers.BatchNormalization()(conv7)
conv7 = K.layers.Activation("relu")(conv7)
conv7 = K.layers.Conv3D(name="conv7b", filters=64, **params)(conv7)
conv7 = K.layers.BatchNormalization()(conv7)
conv7 = K.layers.Activation("relu")(conv7)
pred = K.layers.Conv3D(name="Prediction", filters=n_out, kernel_size=(1, 1, 1),
data_format=data_format, activation="sigmoid")(conv7)
if return_model:
model = K.models.Model(inputs=[inputs], outputs=[pred])
if print_summary:
print(model.summary())
return pred, model
else:
return pred
def unet2D(input_tensor, use_upsampling=False,
n_out=1, dropout=0.2, print_summary = False, return_model=False):
"""
2D U-Net
"""
print("2D U-Net Segmentation")
inputs = K.layers.Input(shape=input_tensor, name="Images")
# Convolution parameters
params = dict(kernel_size=(3, 3), activation="relu",
padding="same", data_format=data_format,
kernel_initializer="he_uniform")
# Transposed convolution parameters
params_trans = dict(data_format=data_format,
kernel_size=(2, 2), strides=(2, 2),
padding="same")
fms = 64
conv1 = K.layers.Conv2D(name="conv1a", filters=fms, **params)(inputs)
conv1 = K.layers.Conv2D(name="conv1b", filters=fms, **params)(conv1)
pool1 = K.layers.MaxPooling2D(name="pool1", pool_size=(2, 2))(conv1)
conv2 = K.layers.Conv2D(name="conv2a", filters=fms*2, **params)(pool1)
conv2 = K.layers.Conv2D(name="conv2b", filters=fms*2, **params)(conv2)
pool2 = K.layers.MaxPooling2D(name="pool2", pool_size=(2, 2))(conv2)
conv3 = K.layers.Conv2D(name="conv3a", filters=fms*4, **params)(pool2)
#conv3 = K.layers.Dropout(dropout)(conv3)
conv3 = K.layers.Conv2D(name="conv3b", filters=fms*4, **params)(conv3)
pool3 = K.layers.MaxPooling2D(name="pool3", pool_size=(2, 2))(conv3)
conv4 = K.layers.Conv2D(name="conv4a", filters=fms*8, **params)(pool3)
#conv4 = K.layers.Dropout(dropout)(conv4)
conv4 = K.layers.Conv2D(name="conv4b", filters=fms*8, **params)(conv4)
pool4 = K.layers.MaxPooling2D(name="pool4", pool_size=(2, 2))(conv4)
conv5 = K.layers.Conv2D(name="conv5a", filters=fms*16, **params)(pool4)
conv5 = K.layers.Conv2D(name="conv5b", filters=fms*16, **params)(conv5)
if args.use_upsampling:
up = K.layers.UpSampling2D(name="up6", size=(2, 2))(conv5)
else:
up = K.layers.Conv2DTranspose(name="transConv6", filters=fms*8,
**params_trans)(conv5)
up6 = K.layers.concatenate([up, conv4], axis=concat_axis)
conv6 = K.layers.Conv2D(name="conv6a", filters=fms*8, **params)(up6)
conv6 = K.layers.Conv2D(name="conv6b", filters=fms*8, **params)(conv6)
if args.use_upsampling:
up = K.layers.UpSampling2D(name="up7", size=(2, 2))(conv6)
else:
up = K.layers.Conv2DTranspose(name="transConv7", filters=fms*4,
**params_trans)(conv6)
up7 = K.layers.concatenate([up, conv3], axis=concat_axis)
conv7 = K.layers.Conv2D(name="conv7a", filters=fms*4, **params)(up7)
conv7 = K.layers.Conv2D(name="conv7b", filters=fms*4, **params)(conv7)
if args.use_upsampling:
up = K.layers.UpSampling2D(name="up8", size=(2, 2))(conv7)
else:
up = K.layers.Conv2DTranspose(name="transConv8", filters=fms*2,
**params_trans)(conv7)
up8 = K.layers.concatenate([up, conv2], axis=concat_axis)
conv8 = K.layers.Conv2D(name="conv8a", filters=fms*2, **params)(up8)
conv8 = K.layers.Conv2D(name="conv8b", filters=fms*2, **params)(conv8)
if args.use_upsampling:
up = K.layers.UpSampling2D(name="up9", size=(2, 2))(conv8)
else:
up = K.layers.Conv2DTranspose(name="transConv9", filters=fms,
**params_trans)(conv8)
up9 = K.layers.concatenate([up, conv1], axis=concat_axis)
conv9 = K.layers.Conv2D(name="conv9a", filters=fms, **params)(up9)
conv9 = K.layers.Conv2D(name="conv9b", filters=fms, **params)(conv9)
pred = K.layers.Conv2D(name="PredictionMask",
filters=n_out, kernel_size=(1, 1),
data_format=data_format,
activation="sigmoid")(conv9)
if return_model:
model = K.models.Model(inputs=[inputs], outputs=[pred])
if print_summary:
print(model.summary())
return pred, model
else:
return pred
def conv3D(input_img, print_summary = False, dropout=0.2, n_out=1,
return_model=False):
"""
Simple 3D convolution model based on VGG-16
"""
print("3D Convolutional Binary Classifier based on VGG-16")
inputs = K.layers.Input(shape=input_img, name="Images")
params = dict(kernel_size=(3, 3, 3), activation="relu",
padding="same", data_format=data_format,
kernel_initializer="he_uniform") #RandomUniform(minval=-0.01, maxval=0.01, seed=816))
conv1 = K.layers.Conv3D(name="conv1", filters=64, **params)(inputs)
conv2 = K.layers.Conv3D(name="conv2", filters=64, **params)(conv1)
pool1 = K.layers.MaxPooling3D(name="pool1", pool_size=(2, 2, 2))(conv2)
conv3 = K.layers.Conv3D(name="conv3", filters=128, **params)(pool1)
conv4 = K.layers.Conv3D(name="conv4", filters=128, **params)(conv3)
pool2 = K.layers.MaxPooling3D(name="pool2", pool_size=(2, 2, 2))(conv4)
conv5 = K.layers.Conv3D(name="conv5", filters=256, **params)(pool2)
conv6 = K.layers.Conv3D(name="conv6", filters=256, **params)(conv5)
conv7 = K.layers.Conv3D(name="conv7", filters=256, **params)(conv6)
pool3 = K.layers.MaxPooling3D(name="pool3", pool_size=(2, 2, 2))(conv7)
conv8 = K.layers.Conv3D(name="conv8", filters=512, **params)(pool3)
conv9 = K.layers.Conv3D(name="conv9", filters=512, **params)(conv8)
conv10 = K.layers.Conv3D(name="conv10", filters=512, **params)(conv9)
pool4 = K.layers.MaxPooling3D(name="pool4", pool_size=(2, 2, 2))(conv10)
conv11 = K.layers.Conv3D(name="conv11", filters=512, **params)(pool4)
conv12 = K.layers.Conv3D(name="conv12", filters=512, **params)(conv11)
conv13 = K.layers.Conv3D(name="conv13", filters=512, **params)(conv12)
pool5 = K.layers.MaxPooling3D(name="pool5", pool_size=(2, 2, 2))(conv13)
flat = K.layers.Flatten()(pool5)
dense1 = K.layers.Dense(4096, activation="relu")(flat)
drop1 = K.layers.Dropout(dropout)(dense1)
dense2 = K.layers.Dense(4096, activation="relu")(drop1)
pred = K.layers.Dense(n_out, name="Prediction", activation="sigmoid")(dense2)
if return_model:
model = K.models.Model(inputs=[inputs], outputs=[pred])
if print_summary:
print(model.summary())
return pred, model
else:
return pred
def conv2D(input_tensor, print_summary = False, dropout=0.2, n_out=1, return_model=False):
"""
Simple 2D convolution model based on VGG-16
"""
print("2D Convolutional Binary Classifier based on VGG-16")
inputs = K.layers.Input(shape=input_tensor, name="Images")
params = dict(kernel_size=(3, 3), activation="relu",
padding="same", data_format=data_format,
kernel_initializer="he_uniform") #RandomUniform(minval=-0.01, maxval=0.01, seed=816))
conv1 = K.layers.Conv2D(name="conv1", filters=64, **params)(inputs)
conv2 = K.layers.Conv2D(name="conv2", filters=64, **params)(conv1)
pool1 = K.layers.MaxPooling2D(name="pool1", pool_size=(2, 2))(conv2)
conv3 = K.layers.Conv2D(name="conv3", filters=128, **params)(pool1)
conv4 = K.layers.Conv2D(name="conv4", filters=128, **params)(conv3)
pool2 = K.layers.MaxPooling2D(name="pool2", pool_size=(2, 2))(conv4)
conv5 = K.layers.Conv2D(name="conv5", filters=256, **params)(pool2)
conv6 = K.layers.Conv2D(name="conv6", filters=256, **params)(conv5)
conv7 = K.layers.Conv2D(name="conv7", filters=256, **params)(conv6)
pool3 = K.layers.MaxPooling2D(name="pool3", pool_size=(2, 2))(conv7)
conv8 = K.layers.Conv2D(name="conv8", filters=512, **params)(pool3)
conv9 = K.layers.Conv2D(name="conv9", filters=512, **params)(conv8)
conv10 = K.layers.Conv2D(name="conv10", filters=512, **params)(conv9)
pool4 = K.layers.MaxPooling2D(name="pool4", pool_size=(2, 2))(conv10)
conv11 = K.layers.Conv2D(name="conv11", filters=512, **params)(pool4)
conv12 = K.layers.Conv2D(name="conv12", filters=512, **params)(conv11)
conv13 = K.layers.Conv2D(name="conv13", filters=512, **params)(conv12)
pool5 = K.layers.MaxPooling2D(name="pool5", pool_size=(2, 2))(conv13)
flat = K.layers.Flatten()(pool5)
dense1 = K.layers.Dense(4096, activation="relu")(flat)
drop1 = K.layers.Dropout(dropout)(dense1)
dense2 = K.layers.Dense(4096, activation="relu")(drop1)
pred = K.layers.Dense(n_out, name="Prediction", activation="sigmoid")(dense2)
if return_model:
model = K.models.Model(inputs=[inputs], outputs=[pred])
if print_summary:
print(model.summary())
return pred, model
else:
return pred
if args.single_class_output:
if args.D2: # 2D convnet model
pred, model = conv2D(tensor_shape,
print_summary=args.print_model, n_out=args.num_outputs,
return_model=True)
else: # 3D convet model
pred, model = conv3D(tensor_shape,
print_summary=args.print_model, n_out=args.num_outputs,
return_model=True)
else:
if args.D2: # 2D U-Net model
pred, model = unet2D(tensor_shape,
use_upsampling=args.use_upsampling,
print_summary=args.print_model, n_out=args.num_outputs,
return_model=True)
else: # 3D U-Net model
pred, model = unet3D(tensor_shape,
use_upsampling=args.use_upsampling,
print_summary=args.print_model, n_out=args.num_outputs,
return_model=True)
# Freeze layers
if args.inference:
for layer in model.layers:
layer.trainable = False
# Performance metrics for model
if args.single_class_output:
model.compile(loss="binary_crossentropy",
optimizer="adam",
metrics=["accuracy"])
else:
model.compile(loss=dice_coef_loss,
optimizer="adam",
metrics=[dice_coef, "accuracy"])
def get_imgs():
# Just feed completely random data in for the benchmark testing
sh = [args.bz] + list(tensor_shape)
imgs = np.random.rand(*sh)
while True:
yield imgs
def get_batch():
# Just feed completely random data in for the benchmark testing
sh = [args.bz] + list(tensor_shape)
imgs = np.random.rand(*sh)
if args.single_class_output:
truths = np.random.rand(args.bz, args.num_outputs)
else:
truths = np.random.rand(*sh)
while True:
yield imgs, truths
# Same number of sample to process regardless of batch size
# So if we have a larger batch size we can take fewer steps.
total_steps = args.num_datapoints//args.bz
print("Using random data.")
if args.inference:
print("Testing inference speed.")
else:
print("Testing training speed.")
start_time = time.time()
if args.inference:
for _ in range(args.epochs):
model.predict_generator(get_imgs(), steps=total_steps, verbose=1)
else:
model.fit_generator(get_batch(), steps_per_epoch=total_steps,
epochs=args.epochs, verbose=1)
if args.inference:
import shutil
dirName = "./tensorflow_serving_model"
if args.single_class_output:
dirName += "_VGG16"
else:
dirName += "_UNET"
if args.D2:
dirName += "_2D"
else:
dirName += "_3D"
shutil.rmtree(dirName, ignore_errors=True)
# Save TensorFlow serving model
builder = saved_model_builder.SavedModelBuilder(dirName)
# Create prediction signature to be used by TensorFlow Serving Predict API
signature = predict_signature_def(inputs={"images": model.input},
outputs={"scores": model.output})
# Save the meta graph and the variables
builder.add_meta_graph_and_variables(sess=K.backend.get_session(), tags=[tag_constants.SERVING],
signature_def_map={"predict": signature})
builder.save()
print("Saved TensorFlow Serving model to: {}".format(dirName))
stop_time = time.time()
print("\n\nTotal time = {:,.3f} seconds".format(stop_time - start_time))
print("Total images = {:,}".format(args.epochs*args.num_datapoints))
print("Speed = {:,.3f} images per second".format( \
(args.epochs*args.num_datapoints)/(stop_time - start_time)))
``` |
{
"source": "jingxu85/theta-etl",
"score": 2
} |
#### File: blockchainetl/streaming/streamer_adapter_stub.py
```python
class StreamerAdapterStub:
def open(self):
pass
def get_current_block_height(self):
return 0
def export_all(self, start_block, end_block):
pass
def close(self):
pass
```
#### File: thetaetl/mappers/transaction_mapper.py
```python
from thetaetl.domain.transaction import ThetaTransaction
from thetaetl.mappers.raw_transaction_mapper import ThetaRawTransactionMapper
from thetaetl.utils import hex_to_dec, to_normalized_address
class ThetaTransactionMapper(object):
def __init__(self, raw_transaction_mapper=None):
if raw_transaction_mapper is None:
self.raw_transaction_mapper = ThetaRawTransactionMapper()
else:
self.raw_transaction_mapper = raw_transaction_mapper
def json_dict_to_transaction(self, json_dict):
transaction = ThetaTransaction()
transaction.hash = json_dict.get('hash')
transaction.type = json_dict.get('tx_type')
transaction.raw = self.raw_transaction_mapper.json_dict_to_raw_transaction(json_dict.get('raw'), transaction.type)
return transaction
def transaction_to_dict(self, transaction):
raw_transactions = self.raw_transaction_mapper.raw_transaction_to_dict(transaction.raw, transaction.type)
return {
'type': 'transaction',
'raw': raw_transactions,
'tx_type': transaction.type,
'hash': transaction.hash,
}
```
#### File: thetaetl/mappers/vote_mapper.py
```python
from thetaetl.domain.vote import ThetaVote
class ThetaVoteMapper(object):
def json_dict_to_vote(self, json_dict):
vote = ThetaVote()
vote.Block = json_dict.get('Block')
vote.Epoch = json_dict.get('Epoch')
vote.Height = json_dict.get('Height')
vote.ID = json_dict.get('ID')
vote.Signature = json_dict.get('Signature')
return vote
def vote_to_dict(self, vote):
return {
'type': 'vote',
'Block': vote.Block,
'Epoch': vote.Epoch,
'Height': vote.Height,
'ID': vote.ID,
'Signature': vote.Signature
}
``` |
{
"source": "jing-xu/analytics-zoo",
"score": 2
} |
#### File: learn/tf/estimator.py
```python
from pyspark.sql import DataFrame
from bigdl.optim.optimizer import MaxEpoch
from zoo.tfpark.tf_dataset import TFNdarrayDataset
from zoo.tfpark.model import _standarize_feature_label_dataset
from zoo.common.utils import load_from_file
from zoo.orca.data.tf.data import Dataset, TFDataDataset2
from zoo.orca.data import SparkXShards
from zoo.orca.learn.tf.utils import *
from zoo.orca.learn.trigger import Trigger
from zoo.orca.learn.utils import find_latest_checkpoint, convert_predict_to_xshard
from zoo.tfpark import KerasModel
from zoo.tfpark import TFOptimizer, TFNet, ZooOptimizer
from zoo.tfpark.tf_optimizer import StatelessMetric
from zoo.tfpark.utils import evaluate_metrics
from zoo.util import nest
from zoo.util.tf import save_tf_checkpoint
class Estimator(object):
def fit(self, data, epochs, **kwargs):
pass
def predict(self, data, **kwargs):
pass
def evaluate(self, data, **kwargs):
pass
def load_orca_checkpoint(self, path, version):
"""
Load specified Orca checkpoint.
:param path: checkpoint directory which contains model.* and
optimMethod-TFParkTraining.* files.
:param version: checkpoint version, which is the suffix of model.* file,
i.e., for modle.4 file, the version is 4.
"""
self.load_checkpoint = True
self.checkpoint_path = path
self.checkpoint_version = version
def load_latest_orca_checkpoint(self, path):
"""
Load latest Orca checkpoint under specified directory.
:param path: directory containing Orca checkpoint files.
"""
ckpt_path, _, version = find_latest_checkpoint(path, model_type="tf")
if ckpt_path is None:
raise Exception("Cannot find checkpoint")
self.load_orca_checkpoint(ckpt_path, version)
def set_tensorboard(self, log_dir, app_name):
"""
Set summary information during the training process for visualization purposes.
Saved summary can be viewed via TensorBoard.
In order to take effect, it needs to be called before fit.
Training summary will be saved to 'log_dir/app_name/train'
and validation summary (if any) will be saved to 'log_dir/app_name/validation'.
# Arguments
:param log_dir: The base directory path to store training and validation logs.
:param app_name: The name of the application.
"""
self.log_dir = log_dir
self.app_name = app_name
def get_train_summary(self, tag=None):
"""
Get the scalar from model train summary
Return list of summary data of [iteration_number, scalar_value, timestamp]
# Arguments
tag: The string variable represents the scalar wanted
"""
if self.tf_optimizer:
return self.tf_optimizer.estimator.get_train_summary(tag)
return None
def get_validation_summary(self, tag=None):
"""
Get the scalar from model validation summary
Return list of summary data of [iteration_number, scalar_value, timestamp]
Note: The metric and tag may not be consistent
Please look up following form to pass tag parameter
Left side is your metric during compile
Right side is the tag you should pass
'Accuracy' | 'Top1Accuracy'
'BinaryAccuracy' | 'Top1Accuracy'
'CategoricalAccuracy' | 'Top1Accuracy'
'SparseCategoricalAccuracy' | 'Top1Accuracy'
'AUC' | 'AucScore'
'HitRatio' | 'HitRate@k' (k is Top-k)
'Loss' | 'Loss'
'MAE' | 'MAE'
'NDCG' | 'NDCG'
'TFValidationMethod' | '${name + " " + valMethod.toString()}'
'Top5Accuracy' | 'Top5Accuracy'
'TreeNNAccuracy' | 'TreeNNAccuracy()'
'MeanAveragePrecision' | 'MAP@k' (k is Top-k) (BigDL)
'MeanAveragePrecision' | 'PascalMeanAveragePrecision' (Zoo)
'StatelessMetric' | '${name}'
# Arguments
tag: The string variable represents the scalar wanted
"""
if self.tf_optimizer:
for val_method in self.tf_optimizer.tf_model.val_methods:
if isinstance(val_method, StatelessMetric):
if tag == val_method.name:
return self.tf_optimizer.estimator.get_validation_summary(tag)
else:
if tag == str(val_method.val_method):
return self.tf_optimizer.estimator.\
get_validation_summary("{} {}".format(val_method.name, tag))
continue
return None
@staticmethod
def from_graph(*, inputs, outputs=None,
labels=None, loss=None, optimizer=None,
clip_norm=None, clip_value=None,
metrics=None, updates=None,
sess=None, model_dir=None, backend="bigdl"):
"""
Create an Estimator for tesorflow graph.
:param inputs: input tensorflow tensors.
:param outputs: output tensorflow tensors.
:param labels: label tensorflow tensors.
:param loss: The loss tensor of the TensorFlow model, should be a scalar
:param optimizer: tensorflow optimization method.
:param clip_norm: float >= 0. Gradients will be clipped when their L2 norm exceeds
this value.
:param clip_value: a float >= 0 or a tuple of two floats.
If clip_value is a float, gradients will be clipped when their absolute value
exceeds this value.
If clip_value is a tuple of two floats, gradients will be clipped when their value less
than clip_value[0] or larger than clip_value[1].
:param metrics: metric tensor.
:param sess: the current TensorFlow Session, if you want to used a pre-trained model,
you should use the Session to load the pre-trained variables and pass it to estimator
:param model_dir: location to save model checkpoint and summaries.
:param backend: backend for estimator. Now it only can be "bigdl".
:return: an Estimator object.
"""
assert backend == "bigdl", "only bigdl backend is supported for now"
return TFOptimizerWrapper(inputs=inputs,
outputs=outputs,
labels=labels,
loss=loss,
optimizer=optimizer,
clip_norm=clip_norm,
clip_value=clip_value,
metrics=metrics, updates=updates,
sess=sess,
model_dir=model_dir
)
@staticmethod
def from_keras(keras_model, metrics=None, model_dir=None, optimizer=None, backend="bigdl"):
"""
Create an Estimator from a tensorflow.keras model. The model must be compiled.
:param keras_model: the tensorflow.keras model, which must be compiled.
:param metrics: user specified metric.
:param model_dir: location to save model checkpoint and summaries.
:param optimizer: an optional bigdl optimMethod that will override the optimizer in
keras_model.compile
:param backend: backend for estimator. Now it only can be "bigdl".
:return: an Estimator object.
"""
assert backend == "bigdl", "only bigdl backend is supported for now"
return TFKerasWrapper(keras_model, metrics, model_dir, optimizer)
def save_tf_checkpoint(self, path):
"""
Save tensorflow checkpoint in this estimator.
:param path: tensorflow checkpoint path.
"""
raise NotImplementedError()
def save_keras_model(self, path, overwrite=True):
"""
Save tensorflow keras model in this estimator.
:param path: keras model save path.
:param overwrite: Whether to silently overwrite any existing file at the target location.
"""
raise NotImplementedError()
@staticmethod
def load_keras_model(path):
"""
Create Estimator by loading an existing keras model (with weights) from HDF5 file.
:param path: String. The path to the pre-defined model.
:return: Orca TF Estimator.
"""
from tensorflow.python.keras import models
def load_func(file_path):
return models.load_model(file_path)
model = load_from_file(load_func, path)
return Estimator.from_keras(keras_model=model)
def save_keras_weights(self, filepath, overwrite=True, save_format=None):
"""
Save tensorflow keras model weights in this estimator.
:param path: keras model weights save path.
:param overwrite: Whether to silently overwrite any existing file at the target location.
:param save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or
'.keras' will default to HDF5 if `save_format` is `None`. Otherwise
`None` defaults to 'tf'.
"""
raise NotImplementedError()
def load_keras_weights(self, filepath, by_name=False):
"""
Save tensorflow keras model in this estimator.
:param filepath: keras model weights save path.
:param by_name: Boolean, whether to load weights by name or by topological
order. Only topological loading is supported for weight files in
TensorFlow format.
"""
raise NotImplementedError()
def is_tf_data_dataset(data):
is_dataset = isinstance(data, tf.data.Dataset)
is_dataset_v2 = isinstance(data, tf.python.data.ops.dataset_ops.DatasetV2)
return is_dataset or is_dataset_v2
def to_dataset(data, batch_size, batch_per_thread, validation_data,
feature_cols, labels_cols, hard_code_batch_size,
sequential_order, shuffle, auto_shard_files):
# todo wrap argument into kwargs
if validation_data:
if isinstance(data, SparkXShards):
assert isinstance(validation_data, SparkXShards), \
"train data and validation data should be both SparkXShards"
if isinstance(data, Dataset):
assert isinstance(validation_data, Dataset), \
"train data and validation data should be both orca.data.tf.Dataset"
if isinstance(data, DataFrame):
assert isinstance(validation_data, DataFrame), \
"train data and validation data should be both Spark DataFrame"
if isinstance(data, tf.data.Dataset):
assert isinstance(validation_data, tf.data.Dataset), \
"train data and validation data should be both tf.data.Dataset"
if isinstance(data, SparkXShards):
dataset = xshards_to_tf_dataset(data,
batch_size,
batch_per_thread,
validation_data,
hard_code_batch_size=hard_code_batch_size,
sequential_order=sequential_order,
shuffle=shuffle)
elif isinstance(data, Dataset):
dataset = TFDataDataset2(data, batch_size=batch_size,
batch_per_thread=batch_per_thread,
validation_dataset=validation_data)
elif isinstance(data, DataFrame):
dataset = TFDataset.from_dataframe(data, feature_cols, labels_cols,
batch_size,
batch_per_thread,
hard_code_batch_size,
validation_data,
sequential_order,
shuffle
)
elif is_tf_data_dataset(data):
dataset = TFDataset.from_tf_data_dataset(data,
batch_size,
batch_per_thread,
hard_code_batch_size,
validation_data,
sequential_order,
shuffle, auto_shard_files=auto_shard_files)
else:
raise ValueError("data must be SparkXShards or orca.data.tf.Dataset or "
"Spark DataFrame or tf.data.Dataset")
return dataset
class TFOptimizerWrapper(Estimator):
def __init__(self, *, inputs, outputs, labels, loss,
optimizer, clip_norm, clip_value,
metrics,
updates, sess,
model_dir
):
self.inputs = inputs
self.outputs = outputs
self.labels = labels
self.loss = loss
self.use_bigdl_optim = False
self.clip_norm = clip_norm
self.clip_value = clip_value
if optimizer is not None:
from zoo.orca.learn.optimizers import Optimizer
if isinstance(optimizer, Optimizer):
self.train_op = None
self.optimizer = optimizer.get_optimizer()
self.use_bigdl_optim = True
else:
assert isinstance(optimizer, tf.train.Optimizer), \
"optimizer is of type {}, ".format(type(optimizer)) + \
"it should be an instance of tf.train.Optimizer"
self.optimizer = ZooOptimizer(optimizer)
if clip_norm or clip_value:
gvs = self.optimizer.compute_gradients(self.loss)
if clip_norm:
gvs = [(tf.clip_by_norm(g_v[0], clip_norm), g_v[1]) for g_v in gvs]
if clip_value:
if isinstance(clip_value, tuple):
assert len(clip_value) == 2 and clip_value[0] < clip_value[1], \
"clip value should be (clip_min, clip_max)"
gvs = [(tf.clip_by_value(g_v[0], clip_value[0], clip_value[1]), g_v[1])
for g_v in gvs]
if isinstance(clip_value, (int, float)):
assert clip_value > 0, "clip value should be larger than 0"
gvs = [(tf.clip_by_value(g_v[0], -clip_value, clip_value), g_v[1])
for g_v in gvs]
else:
raise Exception("clip_value should be a tuple or one number")
self.train_op = self.optimizer.apply_gradients(gvs)
else:
self.train_op = self.optimizer.minimize(self.loss)
else:
self.optimizer = None
self.train_op = None
self.metrics = metrics
self.updates = updates
if sess is None:
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
else:
self.sess = sess
self.model_dir = model_dir
self.load_checkpoint = False
self.tf_optimizer = None
self.log_dir = None
self.app_name = None
def fit(self, data,
epochs=1,
batch_size=32,
feature_cols=None,
labels_cols=None,
validation_data=None,
hard_code_batch_size=False,
auto_shard_files=True,
session_config=None,
feed_dict=None,
checkpoint_trigger=None
):
"""
Train this graph model with train data.
:param data: train data. It can be XShards, Spark DataFrame, tf.data.Dataset.
If data is XShards, each element needs to be {'x': a feature numpy array
or a tuple of feature numpy arrays, 'y': a label numpy array or a tuple of
label numpy arrays}
If data is tf.data.Dataset, each element is a tuple of input tensors.
:param epochs: number of epochs to train.
:param batch_size: total batch size for each iteration.
:param feature_cols: feature column names if train data is Spark DataFrame.
:param labels_cols: label column names if train data is Spark DataFrame.
:param validation_data: validation data. Validation data type should be the same
as train data.
:param hard_code_batch_size: whether hard code batch size for training. Default is False.
:param auto_shard_files: whether to automatically detect if the dataset is file-based and
and apply sharding on files, otherwise sharding on records. Default is True.
:param session_config: tensorflow session configuration for training.
Should be object of tf.ConfigProto
:param feed_dict: a dictionary. The key is TensorFlow tensor, usually a
placeholder, the value of the dictionary is a tuple of two elements. The first one of
the tuple is the value to feed to the tensor in training phase and the second one
is the value to feed to the tensor in validation phase.
:param checkpoint_trigger: when to trigger checkpoint during training.
Should be a zoo.orca.learn.trigger, like EveryEpoch(), SeveralIteration(num_iterations),etc.
"""
assert self.labels is not None, \
"labels is None; it should not be None in training"
assert self.loss is not None, \
"loss is None; it should not be None in training"
assert self.optimizer is not None, \
"optimizer is None; it should not be None in training"
if isinstance(data, DataFrame):
assert feature_cols is not None, \
"feature columns is None; it should not be None in training"
assert labels_cols is not None, \
"label columns is None; it should not be None in training"
if checkpoint_trigger is not None:
checkpoint_trigger = Trigger.convert_trigger(checkpoint_trigger)
dataset = to_dataset(data, batch_size=batch_size, batch_per_thread=-1,
validation_data=validation_data,
feature_cols=feature_cols, labels_cols=labels_cols,
hard_code_batch_size=hard_code_batch_size,
sequential_order=False, shuffle=True,
auto_shard_files=auto_shard_files
)
if feed_dict is not None:
tensor_with_value = {key: (value[0], value[1]) for key, value in feed_dict.items()}
else:
tensor_with_value = None
if self.use_bigdl_optim:
self.tf_optimizer = TFOptimizer.from_loss(
self.loss, self.optimizer,
session=self.sess, inputs=(self.inputs, self.labels), dataset=dataset,
clip_norm=self.clip_norm, clip_value=self.clip_value, metrics=self.metrics,
tensor_with_value=tensor_with_value, session_config=session_config,
model_dir=self.model_dir, updates=self.updates)
else:
self.tf_optimizer = TFOptimizer.from_train_op(
train_op=self.train_op,
loss=self.loss,
inputs=self.inputs,
labels=self.labels,
dataset=dataset,
metrics=self.metrics,
updates=self.updates, sess=self.sess,
tensor_with_value=tensor_with_value,
session_config=session_config,
model_dir=self.model_dir)
if self.load_checkpoint:
self.tf_optimizer.load_checkpoint(self.checkpoint_path, self.checkpoint_version)
if self.log_dir and self.app_name:
self.tf_optimizer.estimator.set_tensorboad(self.log_dir, self.app_name)
self.tf_optimizer.optimize(end_trigger=MaxEpoch(epochs),
checkpoint_trigger=checkpoint_trigger)
return self
def predict(self, data, batch_size=4,
feature_cols=None,
hard_code_batch_size=False,
auto_shard_files=True,
):
"""
Predict input data
:param data: data to be predicted. It can be XShards, Spark DataFrame.
If data is XShards, each element needs to be {'x': a feature numpy array
or a tuple of feature numpy arrays}.
:param batch_size: batch size per thread
:param feature_cols: list of feature column names if input data is Spark DataFrame.
:param hard_code_batch_size: whether to hard code batch size for prediction.
The default value is False.
:return: predicted result.
If input data is XShards or tf.data.Dataset, the predict result is a XShards,
and the schema for each result is: {'prediction': predicted numpy array or
list of predicted numpy arrays}.
If input data is Spark DataFrame, the predict result is a DataFrame which includes original
columns plus 'prediction' column. The 'prediction' column can be FloatType, VectorUDT
or Array of VectorUDT depending on model outputs shape.
"""
assert self.outputs is not None, \
"output is None, it should not be None in prediction"
if isinstance(data, DataFrame):
assert feature_cols is not None, \
"feature columns is None; it should not be None in prediction"
assert not is_tf_data_dataset(data), "tf.data.Dataset currently cannot be used for" \
"estimator prediction"
dataset = to_dataset(data, batch_size=-1, batch_per_thread=batch_size,
validation_data=None,
feature_cols=feature_cols, labels_cols=None,
hard_code_batch_size=hard_code_batch_size,
sequential_order=True,
shuffle=False,
auto_shard_files=auto_shard_files,
)
flat_inputs = nest.flatten(self.inputs)
flat_outputs = nest.flatten(self.outputs)
tfnet = TFNet.from_session(sess=self.sess, inputs=flat_inputs, outputs=flat_outputs)
predicted_rdd = tfnet.predict(dataset)
if isinstance(data, DataFrame):
return convert_predict_to_dataframe(data, predicted_rdd)
elif isinstance(data, SparkXShards) or isinstance(data, tf.data.Dataset):
return convert_predict_to_xshard(predicted_rdd)
else:
return predicted_rdd
def evaluate(self, data, batch_size=32,
feature_cols=None,
labels_cols=None,
hard_code_batch_size=False,
auto_shard_files=True,
):
"""
Evaluate model.
:param data: evaluation data. It can be XShards, Spark DataFrame, tf.data.Dataset.
If data is XShards, each element needs to be {'x': a feature numpy array
or a tuple of feature numpy arrays, 'y': a label numpy array or a tuple of
label numpy arrays}
If data is tf.data.Dataset, each element is a tuple of input tensors.
:param batch_size: batch size per thread.
:param feature_cols: feature_cols: feature column names if train data is Spark DataFrame.
:param labels_cols: label column names if train data is Spark DataFrame.
:param hard_code_batch_size: whether to hard code batch size for evaluation.
:return: evaluation result as a dictionary of {'metric name': metric value}
"""
assert self.metrics is not None, \
"metrics is None, it should not be None in evaluate"
if isinstance(data, DataFrame):
assert feature_cols is not None, \
"feature columns is None; it should not be None in evaluation"
assert labels_cols is not None, \
"label columns is None; it should not be None in evaluation"
dataset = to_dataset(data, batch_size=-1, batch_per_thread=batch_size,
validation_data=None,
feature_cols=feature_cols, labels_cols=labels_cols,
hard_code_batch_size=hard_code_batch_size,
sequential_order=True,
shuffle=False,
auto_shard_files=auto_shard_files,
)
flat_inputs = nest.flatten(self.inputs)
flat_labels = nest.flatten(self.labels)
return evaluate_metrics(flat_inputs + flat_labels,
sess=self.sess,
dataset=dataset, metrics=self.metrics)
def save_tf_checkpoint(self, path):
save_tf_checkpoint(self.sess, path)
class TFKerasWrapper(Estimator):
def __init__(self, keras_model, metrics, model_dir, optimizer):
self.model = KerasModel(keras_model, model_dir)
self.load_checkpoint = False
self.metrics = metrics
self.tf_optimizer = None
self.optimizer = optimizer
from zoo.orca.learn.optimizers import Optimizer
if self.optimizer is not None and isinstance(self.optimizer, Optimizer):
self.optimizer = self.optimizer.get_optimizer()
self.log_dir = None
self.app_name = None
def fit(self, data,
epochs=1,
batch_size=32,
feature_cols=None,
labels_cols=None,
validation_data=None,
hard_code_batch_size=False,
session_config=None,
checkpoint_trigger=None,
auto_shard_files=True,
):
"""
Train this keras model with train data.
:param data: train data. It can be XShards, Spark DataFrame, tf.data.Dataset.
If data is XShards, each element needs to be {'x': a feature numpy array
or a tuple of feature numpy arrays, 'y': a label numpy array or a tuple of
label numpy arrays}
If data is tf.data.Dataset, each element is [feature tensor tuple, label tensor tuple]
:param epochs: number of epochs to train.
:param batch_size: total batch size for each iteration.
:param feature_cols: feature column names if train data is Spark DataFrame.
:param labels_cols: label column names if train data is Spark DataFrame.
:param validation_data: validation data. Validation data type should be the same
as train data.
:param hard_code_batch_size: whether hard code batch size for training. Default is False.
:param session_config: tensorflow session configuration for training.
Should be object of tf.ConfigProto
:param checkpoint_trigger: when to trigger checkpoint during training.
Should be a zoo.orca.learn.trigger, like EveryEpoch(), SeveralIteration(num_iterations),etc.
"""
if isinstance(data, DataFrame):
assert feature_cols is not None, \
"feature columns is None; it should not be None in training"
assert labels_cols is not None, \
"label columns is None; it should not be None in training"
if isinstance(data, tf.data.Dataset):
assert isinstance(data.element_spec, tuple), \
"If data is tf.data.Dataset, each element should be " \
"(feature tensors, label tensor), where each feature/label tensor can be " \
"either a single tensor or a tuple of tensors"
if validation_data is not None:
assert isinstance(validation_data, tf.data.Dataset), \
"train data and validation data should be both tf.data.Dataset"
assert isinstance(validation_data.element_spec, tuple), \
"If validation_data is tf.data.Dataset, each element should be " \
"(feature tensors, label tensor), where each feature/label tensor can be " \
"either a single tensor or a tuple of tensors"
if checkpoint_trigger is not None:
checkpoint_trigger = Trigger.convert_trigger(checkpoint_trigger)
dataset = to_dataset(data, batch_size=batch_size, batch_per_thread=-1,
validation_data=validation_data,
feature_cols=feature_cols, labels_cols=labels_cols,
hard_code_batch_size=hard_code_batch_size,
sequential_order=False, shuffle=True,
auto_shard_files=auto_shard_files)
if isinstance(dataset, TFNdarrayDataset):
dataset = _standarize_feature_label_dataset(dataset, self.model.model)
self.tf_optimizer = TFOptimizer.from_keras(self.model.model, dataset,
model_dir=self.model.model_dir,
session_config=session_config,
metrics=self.metrics,
optimizer=self.optimizer)
if self.load_checkpoint:
self.tf_optimizer.load_checkpoint(self.checkpoint_path, self.checkpoint_version)
if self.log_dir and self.app_name:
self.tf_optimizer.estimator.set_tensorboad(self.log_dir, self.app_name)
self.tf_optimizer.optimize(MaxEpoch(epochs), checkpoint_trigger=checkpoint_trigger)
return self
def predict(self, data, batch_size=4,
feature_cols=None,
hard_code_batch_size=False,
auto_shard_files=True,
):
"""
Predict input data
:param data: data to be predicted.
It can be XShards, Spark DataFrame, or tf.data.Dataset.
If data is XShard, each element needs to be {'x': a feature numpy array
or a tuple of feature numpy arrays}.
If data is tf.data.Dataset, each element is feature tensor tuple
:param batch_size: batch size per thread
:param feature_cols: list of feature column names if input data is Spark DataFrame.
:param hard_code_batch_size: if require hard code batch size for prediction.
The default value is False.
:return: predicted result.
If input data is XShards or tf.data.Dataset, the predict result is also a XShards,
and the schema for each result is: {'prediction': predicted numpy array or
list of predicted numpy arrays}.
If input data is Spark DataFrame, the predict result is a DataFrame which includes
original columns plus 'prediction' column. The 'prediction' column can be FloatType,
VectorUDT or Array of VectorUDT depending on model outputs shape.
"""
if isinstance(data, DataFrame):
assert feature_cols is not None, \
"feature columns is None; it should not be None in prediction"
dataset = to_dataset(data, batch_size=-1, batch_per_thread=batch_size,
validation_data=None,
feature_cols=feature_cols, labels_cols=None,
hard_code_batch_size=hard_code_batch_size,
sequential_order=True, shuffle=False,
auto_shard_files=auto_shard_files,
)
predicted_rdd = self.model.predict(dataset, batch_size)
if isinstance(data, DataFrame):
return convert_predict_to_dataframe(data, predicted_rdd)
elif isinstance(data, SparkXShards) or isinstance(data, tf.data.Dataset):
return convert_predict_to_xshard(predicted_rdd)
else:
return predicted_rdd
def evaluate(self, data, batch_size=4,
feature_cols=None,
labels_cols=None,
hard_code_batch_size=False,
auto_shard_files=True
):
"""
Evaluate model.
:param data: evaluation data. It can be XShards, Spark DataFrame, tf.data.Dataset.
If data is XShards, each element needs to be {'x': a feature numpy array
or a tuple of feature numpy arrays, 'y': a label numpy array or a tuple of
label numpy arrays}
If data is tf.data.Dataset, each element is [feature tensor tuple, label tensor tuple]
:param batch_size: batch size per thread.
:param feature_cols: feature_cols: feature column names if train data is Spark DataFrame.
:param labels_cols: label column names if train data is Spark DataFrame.
:param hard_code_batch_size: whether to hard code batch size for evaluation.
:return: evaluation result as a dictionary of {'metric name': metric value}
"""
if isinstance(data, DataFrame):
assert feature_cols is not None, \
"feature columns is None; it should not be None in evaluation"
assert labels_cols is not None, \
"label columns is None; it should not be None in evaluation"
dataset = to_dataset(data, batch_size=-1, batch_per_thread=batch_size,
validation_data=None,
feature_cols=feature_cols, labels_cols=labels_cols,
hard_code_batch_size=hard_code_batch_size,
sequential_order=True, shuffle=False,
auto_shard_files=auto_shard_files
)
return self.model.evaluate(dataset, batch_per_thread=batch_size)
def save_keras_model(self, path, overwrite=True):
self.model.save_model(path, overwrite=overwrite)
def save_keras_weights(self, filepath, overwrite=True, save_format=None):
self.model.save_weights(filepath, overwrite, save_format)
def load_keras_weights(self, filepath, by_name=False):
self.model.load_weights(filepath, by_name)
``` |
{
"source": "jingxuanyang/Shopee-Product-Matching",
"score": 2
} |
#### File: input/shopee-competition-utils/get_embeddings.py
```python
import gc
import torch
import numpy as np
from tqdm import tqdm
from sklearn.feature_extraction.text import TfidfVectorizer
from augmentations import get_test_transforms, get_valid_transforms
from dataset import ShopeeImageDataset
from config import CFG
def get_image_embeddings(df, model):
image_dataset = ShopeeImageDataset(df,transform=get_test_transforms())
image_loader = torch.utils.data.DataLoader(
image_dataset,
batch_size=CFG.BATCH_SIZE,
pin_memory=True,
num_workers = CFG.NUM_WORKERS,
drop_last=False
)
embeds = []
with torch.no_grad():
for img,label in tqdm(image_loader):
img = img.to(CFG.DEVICE)
label = label.to(CFG.DEVICE)
feat,_ = model(img,label)
image_embeddings = feat.detach().cpu().numpy()
embeds.append(image_embeddings)
del model
image_embeddings = np.concatenate(embeds)
print(f'Our image embeddings shape is {image_embeddings.shape}')
del embeds
gc.collect()
return image_embeddings
def get_valid_embeddings(df, model):
model.eval()
image_dataset = ShopeeImageDataset(df,transform=get_valid_transforms())
image_loader = torch.utils.data.DataLoader(
image_dataset,
batch_size=CFG.BATCH_SIZE,
pin_memory=True,
num_workers = CFG.NUM_WORKERS,
drop_last=False
)
embeds = []
with torch.no_grad():
for img,label in tqdm(image_loader):
img = img.to(CFG.DEVICE)
label = label.to(CFG.DEVICE)
feat,_ = model(img,label)
image_embeddings = feat.detach().cpu().numpy()
embeds.append(image_embeddings)
del model
image_embeddings = np.concatenate(embeds)
print(f'Our image embeddings shape is {image_embeddings.shape}')
del embeds
gc.collect()
return image_embeddings
def get_bert_embeddings(df, column, model, chunk=32):
model.eval()
bert_embeddings = torch.zeros((df.shape[0], 768)).to(CFG.DEVICE)
for i in tqdm(list(range(0, df.shape[0], chunk)) + [df.shape[0]-chunk], desc="get_bert_embeddings", ncols=80):
titles = []
for title in df[column][i : i + chunk].values:
try:
title = title.encode('utf-8').decode("unicode_escape")
title = title.encode('ascii', 'ignore').decode("unicode_escape")
except:
pass
#title = text_punctuation(title)
title = title.lower()
titles.append(title)
with torch.no_grad():
if CFG.USE_AMP:
with torch.cuda.amp.autocast():
model_output = model(titles)
else:
model_output = model(titles)
bert_embeddings[i : i + chunk] = model_output
bert_embeddings = bert_embeddings.detach().cpu().numpy()
del model, titles, model_output
gc.collect()
torch.cuda.empty_cache()
return bert_embeddings
def get_tfidf_embeddings(df, max_features = 15000):
model = TfidfVectorizer(stop_words = 'english', binary = True, max_features = max_features)
text_embeddings = model.fit_transform(df['title']).toarray()
print(f'Our title text embedding shape is {text_embeddings.shape}')
del model
gc.collect()
return text_embeddings
```
#### File: input/shopee-competition-utils/get_neighbors.py
```python
import gc
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
from config import CFG
from criterion import precision_score, recall_score, f1_score
def get_image_neighbors(df, embeddings, threshold = 0.2, min2 = False):
nbrs = NearestNeighbors(n_neighbors = 50, metric = 'cosine')
nbrs.fit(embeddings)
distances, indices = nbrs.kneighbors(embeddings)
predictions = []
for k in range(embeddings.shape[0]):
if min2:
idx = np.where(distances[k,] < CFG.BEST_THRESHOLD)[0]
ids = indices[k,idx]
if len(ids) <= 1 and distances[k,1] < threshold:
ids = np.append(ids,indices[k,1])
else:
idx = np.where(distances[k,] < threshold)[0]
ids = indices[k,idx]
posting_ids = ' '.join(df['posting_id'].iloc[ids].values)
predictions.append(posting_ids)
df['pred_matches'] = predictions
df['f1'] = f1_score(df['matches'], df['pred_matches'])
df['recall'] = recall_score(df['matches'], df['pred_matches'])
df['precision'] = precision_score(df['matches'], df['pred_matches'])
del nbrs, distances, indices
gc.collect()
return df
def get_valid_neighbors(df, embeddings, KNN = 50, threshold = 0.36):
nbrs = NearestNeighbors(n_neighbors = KNN, metric = 'cosine')
nbrs.fit(embeddings)
distances, indices = nbrs.kneighbors(embeddings)
predictions = []
for k in range(embeddings.shape[0]):
idx = np.where(distances[k,] < threshold)[0]
ids = indices[k,idx]
posting_ids = ' '.join(df['posting_id'].iloc[ids].values)
predictions.append(posting_ids)
df['pred_matches'] = predictions
df['f1'] = f1_score(df['matches'], df['pred_matches'])
df['recall'] = recall_score(df['matches'], df['pred_matches'])
df['precision'] = precision_score(df['matches'], df['pred_matches'])
del nbrs, distances, indices
gc.collect()
return df, predictions
def get_voting_neighbors(df, distances, indices, threshold = 0.2, min2 = False):
predictions = []
for k in range(distances.shape[0]):
if min2:
idx = np.where(distances[k,] < CFG.BEST_THRESHOLD)[0]
ids = indices[k,idx]
if len(ids) <= 1 and distances[k,1] < threshold:
ids = np.append(ids,indices[k,1])
else:
idx = np.where(distances[k,] < threshold)[0]
ids = indices[k,idx]
posting_ids = ' '.join(df['posting_id'].iloc[ids].values)
predictions.append(posting_ids)
df['pred_matches'] = predictions
df['f1'] = f1_score(df['matches'], df['pred_matches'])
df['recall'] = recall_score(df['matches'], df['pred_matches'])
df['precision'] = precision_score(df['matches'], df['pred_matches'])
return df
def get_voting_nns(embeddings_dict):
embs_num = len(embeddings_dict)
similarities_sum = 0.
for i in range(embs_num):
try:
emb = normalize(embeddings_dict[f'emb_{i}'])
except KeyError:
raise KeyError('Please use keys emb_0, emb_1, etc in embeddings dict.')
similarities = emb.dot(emb.T)
similarities_sum += similarities
similarities_sum = similarities_sum / embs_num
similarities = np.sort(similarities_sum)[:,:-51:-1]
distances = 1 - similarities
indices = np.argsort(similarities_sum)[:,:-51:-1]
return distances, indices
def get_voting_result(df, distances, indices):
predictions = []
for k in range(distances.shape[0]):
idx = np.where(distances[k,] < CFG.BEST_THRESHOLD)[0]
ids = indices[k,idx]
if len(ids) <= 1 and distances[k,1] < CFG.BEST_THRESHOLD_MIN2:
ids = np.append(ids,indices[k,1])
posting_ids = ' '.join(df['posting_id'].iloc[ids].values)
predictions.append(posting_ids)
df['pred_matches'] = predictions
df['f1'] = f1_score(df['matches'], df['pred_matches'])
df['recall'] = recall_score(df['matches'], df['pred_matches'])
df['precision'] = precision_score(df['matches'], df['pred_matches'])
f1 = df.f1.mean()
recall = df.recall.mean()
precision = df.precision.mean()
print(f'f1 score after voting = {f1}, recall = {recall}, precision = {precision}')
return df
def get_union_neighbors(df, embeddings, threshold = 0.2, min2 = False):
nbrs = NearestNeighbors(n_neighbors = 50, metric = 'cosine')
nbrs.fit(embeddings)
distances, indices = nbrs.kneighbors(embeddings)
predictions = []
for k in range(embeddings.shape[0]):
if min2:
idx = np.where(distances[k,] < CFG.BEST_THRESHOLD)[0]
ids = indices[k,idx]
if len(ids) <= 1 and distances[k,1] < threshold:
ids = np.append(ids,indices[k,1])
else:
idx = np.where(distances[k,] < threshold)[0]
ids = indices[k,idx]
predictions.append(df['posting_id'].iloc[ids].values)
del nbrs, distances, indices
gc.collect()
return predictions
def get_voting_predictions(df, distances, indices, threshold = 0.2, min2 = False):
predictions = []
for k in range(distances.shape[0]):
if min2:
idx = np.where(distances[k,] < CFG.BEST_THRESHOLD)[0]
ids = indices[k,idx]
if len(ids) <= 1 and distances[k,1] < threshold:
ids = np.append(ids,indices[k,1])
else:
idx = np.where(distances[k,] < threshold)[0]
ids = indices[k,idx]
predictions.append(df['posting_id'].iloc[ids].values)
return predictions
```
#### File: input/shopee-competition-utils/loss_module.py
```python
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
import math
from config import CFG
class ArcMarginProduct(nn.Module):
r"""Implement of large margin arc distance: :
Args:
in_features: size of each input sample
out_features: size of each output sample
s: norm of input feature
m: margin
cos(theta + m)
"""
def __init__(self, in_features, out_features, s=30.0, m=0.50, easy_margin=False, ls_eps=0.0):
# print('Using ArcFace')
super(ArcMarginProduct, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.ls_eps = ls_eps # label smoothing
self.weight = Parameter(torch.FloatTensor(out_features, in_features))
nn.init.xavier_uniform_(self.weight)
self.easy_margin = easy_margin
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.th = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m
def forward(self, input, label):
# --------------------------- cos(theta) & phi(theta) ---------------------------
if CFG.USE_AMP:
cosine = F.linear(F.normalize(input), F.normalize(self.weight)).float() # if CFG.USE_AMP
else:
cosine = F.linear(F.normalize(input), F.normalize(self.weight))
sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
phi = cosine * self.cos_m - sine * self.sin_m
if self.easy_margin:
phi = torch.where(cosine > 0, phi, cosine)
else:
phi = torch.where(cosine > self.th, phi, cosine - self.mm)
# --------------------------- convert label to one-hot ---------------------------
# one_hot = torch.zeros(cosine.size(), requires_grad=True, device='cuda')
one_hot = torch.zeros(cosine.size(), device=CFG.DEVICE)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
if self.ls_eps > 0:
one_hot = (1 - self.ls_eps) * one_hot + self.ls_eps / self.out_features
# -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
output = (one_hot * phi) + ((1.0 - one_hot) * cosine)
output *= self.s
return output, nn.CrossEntropyLoss()(output,label)
'''
credit : https://github.com/HuangYG123/CurricularFace/blob/8b2f47318117995aa05490c05b455b113489917e/head/metrics.py#L70
'''
def l2_norm(input, axis = 1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class CurricularFace(nn.Module):
def __init__(self, in_features, out_features, s = 30, m = 0.50):
super(CurricularFace, self).__init__()
print('Using Curricular Face')
self.in_features = in_features
self.out_features = out_features
self.m = m
self.s = s
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.threshold = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m
self.kernel = nn.Parameter(torch.Tensor(in_features, out_features))
self.register_buffer('t', torch.zeros(1))
nn.init.normal_(self.kernel, std=0.01)
def forward(self, embbedings, label):
embbedings = l2_norm(embbedings, axis = 1)
kernel_norm = l2_norm(self.kernel, axis = 0)
cos_theta = torch.mm(embbedings, kernel_norm)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
with torch.no_grad():
origin_cos = cos_theta.clone()
target_logit = cos_theta[torch.arange(0, embbedings.size(0)), label].view(-1, 1)
sin_theta = torch.sqrt(1.0 - torch.pow(target_logit, 2))
cos_theta_m = target_logit * self.cos_m - sin_theta * self.sin_m #cos(target+margin)
mask = cos_theta > cos_theta_m
final_target_logit = torch.where(target_logit > self.threshold, cos_theta_m, target_logit - self.mm)
hard_example = cos_theta[mask]
with torch.no_grad():
self.t = target_logit.mean() * 0.01 + (1 - 0.01) * self.t
cos_theta[mask] = hard_example * (self.t + hard_example)
cos_theta.scatter_(1, label.view(-1, 1).long(), final_target_logit)
output = cos_theta * self.s
return output, nn.CrossEntropyLoss()(output,label)
```
#### File: input/shopee-competition-utils/read_dataset.py
```python
import pandas as pd
from config import CFG
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import GroupKFold
def read_dataset():
df = pd.read_csv(CFG.TRAIN_CSV)
df['matches'] = df.label_group.map(df.groupby('label_group').posting_id.agg('unique').to_dict())
df['matches'] = df['matches'].apply(lambda x: ' '.join(x))
gkf = GroupKFold(n_splits=CFG.N_SPLITS)
df['fold'] = -1
for i, (train_idx, valid_idx) in enumerate(gkf.split(X=df, groups=df['label_group'])):
df.loc[valid_idx, 'fold'] = i
labelencoder= LabelEncoder()
df['label_group'] = labelencoder.fit_transform(df['label_group'])
train_df = df[df['fold']!=CFG.TEST_FOLD].reset_index(drop=True)
train_df = train_df[train_df['fold']!=CFG.VALID_FOLD].reset_index(drop=True)
valid_df = df[df['fold']==CFG.VALID_FOLD].reset_index(drop=True)
if CFG.USE_TEST_CSV:
test_df = pd.read_csv(CFG.TEST_CSV)
test_df['matches'] = test_df.label_group.map(test_df.groupby('label_group').posting_id.agg('unique').to_dict())
test_df['matches'] = test_df['matches'].apply(lambda x: ' '.join(x))
test_df['label_group'] = labelencoder.fit_transform(test_df['label_group'])
else:
test_df = df[df['fold']==CFG.TEST_FOLD].reset_index(drop=True)
train_df['label_group'] = labelencoder.fit_transform(train_df['label_group'])
return train_df, valid_df, test_df
```
#### File: input/shopee-competition-utils/run_ensemble.py
```python
import torch
import numpy as np
import pandas as pd
from config import CFG
from read_dataset import read_dataset
from get_neighbors import get_voting_nns, get_voting_neighbors, get_union_neighbors, get_voting_predictions
from search_threshold import search_voting_threshold, search_inb_threshold
from seed_everything import seed_everything
from shopee_text_model import ShopeeBertModel
from get_embeddings import get_bert_embeddings, get_tfidf_embeddings
from blend_neighborhood import blend_neighborhood
from criterion import precision_score, recall_score, f1_score
def run_image_ensemble():
"""
Note that model parameters for neil, min2 and inb are the same.
"""
# resnet
CFG.LOSS_MODULE = CFG.LOSS_MODULES[0]
CFG.MODEL_NAME = CFG.MODEL_NAMES[0]
CFG.MARGIN = CFG.MARGINS[1]
CFG.MODEL_PATH = f'{CFG.MODEL_NAME}_{CFG.LOSS_MODULE}_face_epoch_8_bs_8_margin_{CFG.MARGIN}.pt'
TEST_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_test_embed.csv'
print(f'Loading {TEST_EMBEDDING_PATH} ...')
test_embeddings_1 = np.loadtxt(TEST_EMBEDDING_PATH, delimiter=',')
VALID_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_valid_embed.csv'
print(f'Loading {VALID_EMBEDDING_PATH} ...')
valid_embeddings_1 = np.loadtxt(VALID_EMBEDDING_PATH, delimiter=',')
# resnext
CFG.LOSS_MODULE = CFG.LOSS_MODULES[0]
CFG.MODEL_NAME = CFG.MODEL_NAMES[1]
CFG.MARGIN = CFG.MARGINS[3]
CFG.MODEL_PATH = f'{CFG.MODEL_NAME}_{CFG.LOSS_MODULE}_face_epoch_8_bs_8_margin_{CFG.MARGIN}.pt'
TEST_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_test_embed.csv'
print(f'Loading {TEST_EMBEDDING_PATH} ...')
test_embeddings_2 = np.loadtxt(TEST_EMBEDDING_PATH, delimiter=',')
VALID_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_valid_embed.csv'
print(f'Loading {VALID_EMBEDDING_PATH} ...')
valid_embeddings_2 = np.loadtxt(VALID_EMBEDDING_PATH, delimiter=',')
# densenet
CFG.LOSS_MODULE = CFG.LOSS_MODULES[0]
CFG.MODEL_NAME = CFG.MODEL_NAMES[2]
CFG.MARGIN = CFG.MARGINS[4]
CFG.MODEL_PATH = f'{CFG.MODEL_NAME}_{CFG.LOSS_MODULE}_face_epoch_8_bs_8_margin_{CFG.MARGIN}.pt'
TEST_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_test_embed.csv'
print(f'Loading {TEST_EMBEDDING_PATH} ...')
test_embeddings_3 = np.loadtxt(TEST_EMBEDDING_PATH, delimiter=',')
VALID_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_valid_embed.csv'
print(f'Loading {VALID_EMBEDDING_PATH} ...')
valid_embeddings_3 = np.loadtxt(VALID_EMBEDDING_PATH, delimiter=',')
# efficientnet
CFG.LOSS_MODULE = CFG.LOSS_MODULES[1]
CFG.MODEL_NAME = CFG.MODEL_NAMES[3]
CFG.MARGIN = CFG.MARGINS[0]
CFG.MODEL_PATH = f'{CFG.MODEL_NAME}_{CFG.LOSS_MODULE}_face_epoch_8_bs_8_margin_{CFG.MARGIN}.pt'
TEST_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_test_embed.csv'
print(f'Loading {TEST_EMBEDDING_PATH} ...')
test_embeddings_4 = np.loadtxt(TEST_EMBEDDING_PATH, delimiter=',')
VALID_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_valid_embed.csv'
print(f'Loading {VALID_EMBEDDING_PATH} ...')
valid_embeddings_4 = np.loadtxt(VALID_EMBEDDING_PATH, delimiter=',')
# nfnet
CFG.LOSS_MODULE = CFG.LOSS_MODULES[0]
CFG.MODEL_NAME = CFG.MODEL_NAMES[4]
CFG.MARGIN = CFG.MARGINS[4]
CFG.MODEL_PATH = f'{CFG.MODEL_NAME}_{CFG.LOSS_MODULE}_face_epoch_8_bs_8_margin_{CFG.MARGIN}.pt'
TEST_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_test_embed.csv'
print(f'Loading {TEST_EMBEDDING_PATH} ...')
test_embeddings_5 = np.loadtxt(TEST_EMBEDDING_PATH, delimiter=',')
VALID_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_valid_embed.csv'
print(f'Loading {VALID_EMBEDDING_PATH} ...')
valid_embeddings_5 = np.loadtxt(VALID_EMBEDDING_PATH, delimiter=',')
# read dataset
_, valid_df, test_df = read_dataset()
# get voting of valid embeddings
valid_embeddings_dict = {'emb_0':valid_embeddings_1,
'emb_1':valid_embeddings_2,
'emb_2':valid_embeddings_3,
'emb_3':valid_embeddings_4,
'emb_4':valid_embeddings_5}
distances, indices = get_voting_nns(valid_embeddings_dict)
# search best thresholds
search_voting_threshold(valid_df, distances, indices)
# get voting of test embeddings
test_embeddings_dict = {'emb_0':test_embeddings_1,
'emb_1':test_embeddings_2,
'emb_2':test_embeddings_3,
'emb_3':test_embeddings_4,
'emb_4':test_embeddings_5}
distances, indices = get_voting_nns(test_embeddings_dict)
result_list = [[0 for i in range(3)] for j in range(2)]
# use obtained thresholds to get test results
test_df = get_voting_neighbors(test_df, distances, indices, threshold = CFG.BEST_THRESHOLD, min2 = False)
test_f1 = test_df.f1.mean()
test_recall = test_df.recall.mean()
test_precision = test_df.precision.mean()
result_list[0][0] = test_f1
result_list[0][1] = test_recall
result_list[0][2] = test_precision
print(f'f1 score after voting = {test_f1}, recall = {test_recall}, precision = {test_precision}')
# use obtained thresholds to get test results after min2
test_df = get_voting_neighbors(test_df, distances, indices, threshold = CFG.BEST_THRESHOLD_MIN2, min2 = True)
test_f1 = test_df.f1.mean()
test_recall = test_df.recall.mean()
test_precision = test_df.precision.mean()
result_list[1][0] = test_f1
result_list[1][1] = test_recall
result_list[1][2] = test_precision
print(f'f1 score after min2 and voting = {test_f1}, recall = {test_recall}, precision = {test_precision}')
result_df = pd.DataFrame(result_list, columns=['f1','recall','precision'])
return result_df
def run_text_ensemble():
"""
"""
seed_everything(CFG.SEED_BERT)
_, valid_df, test_df = read_dataset()
valid_embeddings_dict = {}
test_embeddings_dict = {}
for i in range(len(CFG.BERT_MODEL_NAMES)):
CFG.BERT_MODEL_NAME = CFG.BERT_MODEL_NAMES[i]
CFG.MARGIN = CFG.BEST_BERT_MARGINS[0][i]
model = ShopeeBertModel(
model_name = CFG.BERT_MODEL_NAME,
margin = CFG.MARGIN
)
CFG.MODEL_PATH_BERT = f"{CFG.BERT_MODEL_NAME.rsplit('/', 1)[-1]}_epoch8-bs16x1_margin_{CFG.MARGIN}.pt"
SAVE_MODEL_PATH = CFG.TEXT_MODEL_PATH_PREFIX + CFG.MODEL_PATH_BERT
print(CFG.MODEL_PATH_BERT)
model.load_state_dict(torch.load(SAVE_MODEL_PATH, map_location=CFG.DEVICE))
valid_embeddings_dict[f'emb_{i}'] = get_bert_embeddings(valid_df, 'title', model)
test_embeddings_dict[f'emb_{i}'] = get_bert_embeddings(test_df, 'title', model)
distances, indices = get_voting_nns(valid_embeddings_dict)
# search best thresholds
search_voting_threshold(valid_df, distances, indices)
distances, indices = get_voting_nns(test_embeddings_dict)
result_list = [[0 for i in range(3)] for j in range(2)]
# use obtained thresholds to get test results
test_df = get_voting_neighbors(test_df, distances, indices, threshold = CFG.BEST_THRESHOLD, min2 = False)
test_f1 = test_df.f1.mean()
test_recall = test_df.recall.mean()
test_precision = test_df.precision.mean()
result_list[0][0] = test_f1
result_list[0][1] = test_recall
result_list[0][2] = test_precision
print(f'f1 score after voting = {test_f1}, recall = {test_recall}, precision = {test_precision}')
# use obtained thresholds to get test results after min2
test_df = get_voting_neighbors(test_df, distances, indices, threshold = CFG.BEST_THRESHOLD_MIN2, min2 = True)
test_f1 = test_df.f1.mean()
test_recall = test_df.recall.mean()
test_precision = test_df.precision.mean()
result_list[1][0] = test_f1
result_list[1][1] = test_recall
result_list[1][2] = test_precision
print(f'f1 score after min2 and voting = {test_f1}, recall = {test_recall}, precision = {test_precision}')
result_df = pd.DataFrame(result_list, columns=['f1','recall','precision'])
return result_df
def run_nfnet_sbert_ensemble():
seed_everything(CFG.SEED_BERT)
_, valid_df, test_df = read_dataset()
# nfnet
CFG.LOSS_MODULE = CFG.LOSS_MODULES[0]
CFG.MODEL_NAME = CFG.MODEL_NAMES[4]
CFG.MARGIN = CFG.MARGINS[4]
CFG.MODEL_PATH = f'{CFG.MODEL_NAME}_{CFG.LOSS_MODULE}_face_epoch_8_bs_8_margin_{CFG.MARGIN}.pt'
TEST_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_test_embed.csv'
print(f'Loading {TEST_EMBEDDING_PATH} ...')
image_test_embeddings = np.loadtxt(TEST_EMBEDDING_PATH, delimiter=',')
VALID_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_valid_embed.csv'
print(f'Loading {VALID_EMBEDDING_PATH} ...')
image_valid_embeddings = np.loadtxt(VALID_EMBEDDING_PATH, delimiter=',')
# paraphrase-xlm-r-multilingual-v1
CFG.BERT_MODEL_NAME = CFG.BERT_MODEL_NAMES[3]
CFG.MARGIN = CFG.BERT_MARGINS[3]
model = ShopeeBertModel(
model_name = CFG.BERT_MODEL_NAME,
margin = CFG.MARGIN
)
CFG.MODEL_PATH_BERT = f"{CFG.BERT_MODEL_NAME.rsplit('/', 1)[-1]}_epoch8-bs16x1_margin_{CFG.MARGIN}.pt"
SAVE_MODEL_PATH = CFG.TEXT_MODEL_PATH_PREFIX + CFG.MODEL_PATH_BERT
print(CFG.MODEL_PATH_BERT)
model.load_state_dict(torch.load(SAVE_MODEL_PATH, map_location=CFG.DEVICE))
text_valid_embeddings = get_bert_embeddings(valid_df, 'title', model)
text_test_embeddings = get_bert_embeddings(test_df, 'title', model)
# get voting of valid embeddings
valid_embeddings_dict = {'emb_0':image_valid_embeddings,
'emb_1':text_valid_embeddings}
distances, indices = get_voting_nns(valid_embeddings_dict)
# search best thresholds
search_voting_threshold(valid_df, distances, indices, upper=60)
# get voting of test embeddings
test_embeddings_dict = {'emb_0':image_test_embeddings,
'emb_1':text_test_embeddings}
distances, indices = get_voting_nns(test_embeddings_dict)
result_list = [[0 for i in range(3)] for j in range(2)]
# use obtained thresholds to get test results
test_df = get_voting_neighbors(test_df, distances, indices, threshold = CFG.BEST_THRESHOLD, min2 = False)
test_f1 = test_df.f1.mean()
test_recall = test_df.recall.mean()
test_precision = test_df.precision.mean()
result_list[0][0] = test_f1
result_list[0][1] = test_recall
result_list[0][2] = test_precision
print(f'f1 score after voting = {test_f1}, recall = {test_recall}, precision = {test_precision}')
# use obtained thresholds to get test results after min2
test_df = get_voting_neighbors(test_df, distances, indices, threshold = CFG.BEST_THRESHOLD_MIN2, min2 = True)
test_f1 = test_df.f1.mean()
test_recall = test_df.recall.mean()
test_precision = test_df.precision.mean()
result_list[1][0] = test_f1
result_list[1][1] = test_recall
result_list[1][2] = test_precision
print(f'f1 score after min2 and voting = {test_f1}, recall = {test_recall}, precision = {test_precision}')
result_df = pd.DataFrame(result_list, columns=['f1','recall','precision'])
return result_df
def run_tfidf_resnet_ensemble():
seed_everything(CFG.SEED)
_, valid_df, test_df = read_dataset()
# tf-idf
text_valid_embeddings = get_tfidf_embeddings(valid_df)
text_test_embeddings = get_tfidf_embeddings(test_df)
# resnet
CFG.LOSS_MODULE = CFG.LOSS_MODULES[0]
CFG.MODEL_NAME = CFG.MODEL_NAMES[0]
CFG.MARGIN = CFG.MARGINS[1]
CFG.MODEL_PATH = f'{CFG.MODEL_NAME}_{CFG.LOSS_MODULE}_face_epoch_8_bs_8_margin_{CFG.MARGIN}.pt'
TEST_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_test_embed.csv'
print(f'Loading {TEST_EMBEDDING_PATH} ...')
image_test_embeddings = np.loadtxt(TEST_EMBEDDING_PATH, delimiter=',')
VALID_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_valid_embed.csv'
print(f'Loading {VALID_EMBEDDING_PATH} ...')
image_valid_embeddings = np.loadtxt(VALID_EMBEDDING_PATH, delimiter=',')
# get voting of valid embeddings
valid_embeddings_dict = {'emb_0':image_valid_embeddings,
'emb_1':text_valid_embeddings}
distances, indices = get_voting_nns(valid_embeddings_dict)
# search best thresholds
search_voting_threshold(valid_df, distances, indices, upper=60)
# get voting of test embeddings
test_embeddings_dict = {'emb_0':image_test_embeddings,
'emb_1':text_test_embeddings}
distances, indices = get_voting_nns(test_embeddings_dict)
result_list = [[0 for i in range(3)] for j in range(2)]
# use obtained thresholds to get test results
test_df = get_voting_neighbors(test_df, distances, indices, threshold = CFG.BEST_THRESHOLD, min2 = False)
test_f1 = test_df.f1.mean()
test_recall = test_df.recall.mean()
test_precision = test_df.precision.mean()
result_list[0][0] = test_f1
result_list[0][1] = test_recall
result_list[0][2] = test_precision
print(f'f1 score after voting = {test_f1}, recall = {test_recall}, precision = {test_precision}')
# use obtained thresholds to get test results after min2
test_df = get_voting_neighbors(test_df, distances, indices, threshold = CFG.BEST_THRESHOLD_MIN2, min2 = True)
test_f1 = test_df.f1.mean()
test_recall = test_df.recall.mean()
test_precision = test_df.precision.mean()
result_list[1][0] = test_f1
result_list[1][1] = test_recall
result_list[1][2] = test_precision
print(f'f1 score after min2 and voting = {test_f1}, recall = {test_recall}, precision = {test_precision}')
result_df = pd.DataFrame(result_list, columns=['f1','recall','precision'])
return result_df
def combine_predictions(row):
x = np.concatenate([row['image_predictions'], row['text_predictions']])
return ' '.join(np.unique(x))
def run_tfidf_resnet_union():
seed_everything(CFG.SEED)
_, valid_df, test_df = read_dataset()
result_list = [[0 for i in range(3)] for j in range(3)]
# tf-idf
text_valid_embeddings = get_tfidf_embeddings(valid_df)
text_test_embeddings = get_tfidf_embeddings(test_df)
# resnet
CFG.LOSS_MODULE = CFG.LOSS_MODULES[0]
CFG.MODEL_NAME = CFG.MODEL_NAMES[0]
CFG.MARGIN = CFG.MARGINS[1]
CFG.MODEL_PATH = f'{CFG.MODEL_NAME}_{CFG.LOSS_MODULE}_face_epoch_8_bs_8_margin_{CFG.MARGIN}.pt'
TEST_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_test_embed.csv'
print(f'Loading {TEST_EMBEDDING_PATH} ...')
image_test_embeddings = np.loadtxt(TEST_EMBEDDING_PATH, delimiter=',')
VALID_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_valid_embed.csv'
print(f'Loading {VALID_EMBEDDING_PATH} ...')
image_valid_embeddings = np.loadtxt(VALID_EMBEDDING_PATH, delimiter=',')
# text predictions
search_inb_threshold(valid_df,text_valid_embeddings,lower=40,upper=70)
text_predictions = get_union_neighbors(test_df, text_test_embeddings, threshold=CFG.BEST_THRESHOLD)
# text predictions, min2
text_predictions_min2 = get_union_neighbors(test_df, text_test_embeddings, threshold=CFG.BEST_THRESHOLD_MIN2, min2=True)
# text predictions, inb
new_valid_emb = blend_neighborhood(valid_df,text_valid_embeddings)
search_inb_threshold(valid_df,new_valid_emb)
new_test_emb = blend_neighborhood(test_df,text_test_embeddings)
text_predictions_inb = get_union_neighbors(test_df, new_test_emb, threshold=CFG.BEST_THRESHOLD_MIN2, min2 = True)
# image predictions
search_inb_threshold(valid_df,image_valid_embeddings)
image_predictions = get_union_neighbors(test_df, image_test_embeddings, threshold=CFG.BEST_THRESHOLD)
# image predictions, min2
image_predictions_min2 = get_union_neighbors(test_df, image_test_embeddings, threshold=CFG.BEST_THRESHOLD_MIN2, min2=True)
# image predictions, inb
new_valid_emb = blend_neighborhood(valid_df,image_valid_embeddings)
search_inb_threshold(valid_df,new_valid_emb)
new_test_emb = blend_neighborhood(test_df,image_test_embeddings)
image_predictions_inb = get_union_neighbors(test_df, new_test_emb, threshold=CFG.BEST_THRESHOLD_MIN2, min2 = True)
test_df['image_predictions'] = image_predictions
test_df['text_predictions'] = text_predictions
test_df['pred_matches'] = test_df.apply(combine_predictions, axis = 1)
test_df['f1'] = f1_score(test_df['matches'], test_df['pred_matches'])
test_df['recall'] = recall_score(test_df['matches'], test_df['pred_matches'])
test_df['precision'] = precision_score(test_df['matches'], test_df['pred_matches'])
test_f1 = test_df.f1.mean()
test_recall = test_df.recall.mean()
test_precision = test_df.precision.mean()
result_list[0][0] = test_f1
result_list[0][1] = test_recall
result_list[0][2] = test_precision
print(f'Test f1 score = {test_f1}, recall = {test_recall}, precision = {test_precision}')
# Min2
test_df['image_predictions'] = image_predictions_min2
test_df['text_predictions'] = text_predictions_min2
test_df['pred_matches'] = test_df.apply(combine_predictions, axis = 1)
test_df['f1'] = f1_score(test_df['matches'], test_df['pred_matches'])
test_df['recall'] = recall_score(test_df['matches'], test_df['pred_matches'])
test_df['precision'] = precision_score(test_df['matches'], test_df['pred_matches'])
test_f1 = test_df.f1.mean()
test_recall = test_df.recall.mean()
test_precision = test_df.precision.mean()
result_list[1][0] = test_f1
result_list[1][1] = test_recall
result_list[1][2] = test_precision
print(f'Test f1 score after min2 = {test_f1}, recall = {test_recall}, precision = {test_precision}')
# INB
test_df['image_predictions'] = image_predictions_inb
test_df['text_predictions'] = text_predictions_inb
test_df['pred_matches'] = test_df.apply(combine_predictions, axis = 1)
test_df['f1'] = f1_score(test_df['matches'], test_df['pred_matches'])
test_df['recall'] = recall_score(test_df['matches'], test_df['pred_matches'])
test_df['precision'] = precision_score(test_df['matches'], test_df['pred_matches'])
test_f1 = test_df.f1.mean()
test_recall = test_df.recall.mean()
test_precision = test_df.precision.mean()
result_list[2][0] = test_f1
result_list[2][1] = test_recall
result_list[2][2] = test_precision
print(f'Test f1 score after INB = {test_f1}, recall = {test_recall}, precision = {test_precision}')
result_df = pd.DataFrame(result_list,columns=['f1','recall','precision'])
return result_df
def run_nfnet_sbert_union():
seed_everything(CFG.SEED_BERT)
_, valid_df, test_df = read_dataset()
result_list = [[0 for i in range(3)] for j in range(3)]
# nfnet
CFG.LOSS_MODULE = CFG.LOSS_MODULES[0]
CFG.MODEL_NAME = CFG.MODEL_NAMES[4]
CFG.MARGIN = CFG.MARGINS[4]
CFG.MODEL_PATH = f'{CFG.MODEL_NAME}_{CFG.LOSS_MODULE}_face_epoch_8_bs_8_margin_{CFG.MARGIN}.pt'
TEST_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_test_embed.csv'
print(f'Loading {TEST_EMBEDDING_PATH} ...')
image_test_embeddings = np.loadtxt(TEST_EMBEDDING_PATH, delimiter=',')
VALID_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_valid_embed.csv'
print(f'Loading {VALID_EMBEDDING_PATH} ...')
image_valid_embeddings = np.loadtxt(VALID_EMBEDDING_PATH, delimiter=',')
# paraphrase-xlm-r-multilingual-v1
CFG.BERT_MODEL_NAME = CFG.BERT_MODEL_NAMES[3]
CFG.MARGIN = CFG.BERT_MARGINS[3]
model = ShopeeBertModel(
model_name = CFG.BERT_MODEL_NAME,
margin = CFG.MARGIN
)
CFG.MODEL_PATH_BERT = f"{CFG.BERT_MODEL_NAME.rsplit('/', 1)[-1]}_epoch8-bs16x1_margin_{CFG.MARGIN}.pt"
SAVE_MODEL_PATH = CFG.TEXT_MODEL_PATH_PREFIX + CFG.MODEL_PATH_BERT
print(CFG.MODEL_PATH_BERT)
model.load_state_dict(torch.load(SAVE_MODEL_PATH, map_location=CFG.DEVICE))
text_valid_embeddings = get_bert_embeddings(valid_df, 'title', model)
text_test_embeddings = get_bert_embeddings(test_df, 'title', model)
# text predictions
search_inb_threshold(valid_df,text_valid_embeddings)
text_predictions = get_union_neighbors(test_df, text_test_embeddings, threshold=CFG.BEST_THRESHOLD)
# text predictions, min2
text_predictions_min2 = get_union_neighbors(test_df, text_test_embeddings, threshold=CFG.BEST_THRESHOLD_MIN2, min2=True)
# text predictions, inb
new_valid_emb = blend_neighborhood(valid_df,text_valid_embeddings)
search_inb_threshold(valid_df,new_valid_emb)
new_test_emb = blend_neighborhood(test_df,text_test_embeddings)
text_predictions_inb = get_union_neighbors(test_df, new_test_emb, threshold=CFG.BEST_THRESHOLD_MIN2, min2 = True)
# image predictions
search_inb_threshold(valid_df,image_valid_embeddings)
image_predictions = get_union_neighbors(test_df, image_test_embeddings, threshold=CFG.BEST_THRESHOLD)
# image predictions, min2
image_predictions_min2 = get_union_neighbors(test_df, image_test_embeddings, threshold=CFG.BEST_THRESHOLD_MIN2, min2=True)
# image predictions, inb
new_valid_emb = blend_neighborhood(valid_df,image_valid_embeddings)
search_inb_threshold(valid_df,new_valid_emb)
new_test_emb = blend_neighborhood(test_df,image_test_embeddings)
image_predictions_inb = get_union_neighbors(test_df, new_test_emb, threshold=CFG.BEST_THRESHOLD_MIN2, min2 = True)
test_df['image_predictions'] = image_predictions
test_df['text_predictions'] = text_predictions
test_df['pred_matches'] = test_df.apply(combine_predictions, axis = 1)
test_df['f1'] = f1_score(test_df['matches'], test_df['pred_matches'])
test_df['recall'] = recall_score(test_df['matches'], test_df['pred_matches'])
test_df['precision'] = precision_score(test_df['matches'], test_df['pred_matches'])
test_f1 = test_df.f1.mean()
test_recall = test_df.recall.mean()
test_precision = test_df.precision.mean()
result_list[0][0] = test_f1
result_list[0][1] = test_recall
result_list[0][2] = test_precision
print(f'Test f1 score = {test_f1}, recall = {test_recall}, precision = {test_precision}')
# Min2
test_df['image_predictions'] = image_predictions_min2
test_df['text_predictions'] = text_predictions_min2
test_df['pred_matches'] = test_df.apply(combine_predictions, axis = 1)
test_df['f1'] = f1_score(test_df['matches'], test_df['pred_matches'])
test_df['recall'] = recall_score(test_df['matches'], test_df['pred_matches'])
test_df['precision'] = precision_score(test_df['matches'], test_df['pred_matches'])
test_f1 = test_df.f1.mean()
test_recall = test_df.recall.mean()
test_precision = test_df.precision.mean()
result_list[1][0] = test_f1
result_list[1][1] = test_recall
result_list[1][2] = test_precision
print(f'Test f1 score after min2 = {test_f1}, recall = {test_recall}, precision = {test_precision}')
# INB
test_df['image_predictions'] = image_predictions_inb
test_df['text_predictions'] = text_predictions_inb
test_df['pred_matches'] = test_df.apply(combine_predictions, axis = 1)
test_df['f1'] = f1_score(test_df['matches'], test_df['pred_matches'])
test_df['recall'] = recall_score(test_df['matches'], test_df['pred_matches'])
test_df['precision'] = precision_score(test_df['matches'], test_df['pred_matches'])
test_f1 = test_df.f1.mean()
test_recall = test_df.recall.mean()
test_precision = test_df.precision.mean()
result_list[2][0] = test_f1
result_list[2][1] = test_recall
result_list[2][2] = test_precision
print(f'Test f1 score after INB = {test_f1}, recall = {test_recall}, precision = {test_precision}')
result_df = pd.DataFrame(result_list,columns=['f1','recall','precision'])
return result_df
def run_text_image_union():
# read dataset
seed_everything(CFG.SEED_BERT)
_, valid_df, test_df = read_dataset()
##################################### image ####################################
# resnet
CFG.LOSS_MODULE = CFG.LOSS_MODULES[0]
CFG.MODEL_NAME = CFG.MODEL_NAMES[0]
CFG.MARGIN = CFG.MARGINS[1]
CFG.MODEL_PATH = f'{CFG.MODEL_NAME}_{CFG.LOSS_MODULE}_face_epoch_8_bs_8_margin_{CFG.MARGIN}.pt'
TEST_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_test_embed.csv'
print(f'Loading {TEST_EMBEDDING_PATH} ...')
test_embeddings_1 = np.loadtxt(TEST_EMBEDDING_PATH, delimiter=',')
VALID_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_valid_embed.csv'
print(f'Loading {VALID_EMBEDDING_PATH} ...')
valid_embeddings_1 = np.loadtxt(VALID_EMBEDDING_PATH, delimiter=',')
# resnext
CFG.LOSS_MODULE = CFG.LOSS_MODULES[0]
CFG.MODEL_NAME = CFG.MODEL_NAMES[1]
CFG.MARGIN = CFG.MARGINS[3]
CFG.MODEL_PATH = f'{CFG.MODEL_NAME}_{CFG.LOSS_MODULE}_face_epoch_8_bs_8_margin_{CFG.MARGIN}.pt'
TEST_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_test_embed.csv'
print(f'Loading {TEST_EMBEDDING_PATH} ...')
test_embeddings_2 = np.loadtxt(TEST_EMBEDDING_PATH, delimiter=',')
VALID_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_valid_embed.csv'
print(f'Loading {VALID_EMBEDDING_PATH} ...')
valid_embeddings_2 = np.loadtxt(VALID_EMBEDDING_PATH, delimiter=',')
# densenet
CFG.LOSS_MODULE = CFG.LOSS_MODULES[0]
CFG.MODEL_NAME = CFG.MODEL_NAMES[2]
CFG.MARGIN = CFG.MARGINS[4]
CFG.MODEL_PATH = f'{CFG.MODEL_NAME}_{CFG.LOSS_MODULE}_face_epoch_8_bs_8_margin_{CFG.MARGIN}.pt'
TEST_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_test_embed.csv'
print(f'Loading {TEST_EMBEDDING_PATH} ...')
test_embeddings_3 = np.loadtxt(TEST_EMBEDDING_PATH, delimiter=',')
VALID_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_valid_embed.csv'
print(f'Loading {VALID_EMBEDDING_PATH} ...')
valid_embeddings_3 = np.loadtxt(VALID_EMBEDDING_PATH, delimiter=',')
# efficientnet
CFG.LOSS_MODULE = CFG.LOSS_MODULES[1]
CFG.MODEL_NAME = CFG.MODEL_NAMES[3]
CFG.MARGIN = CFG.MARGINS[0]
CFG.MODEL_PATH = f'{CFG.MODEL_NAME}_{CFG.LOSS_MODULE}_face_epoch_8_bs_8_margin_{CFG.MARGIN}.pt'
TEST_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_test_embed.csv'
print(f'Loading {TEST_EMBEDDING_PATH} ...')
test_embeddings_4 = np.loadtxt(TEST_EMBEDDING_PATH, delimiter=',')
VALID_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_valid_embed.csv'
print(f'Loading {VALID_EMBEDDING_PATH} ...')
valid_embeddings_4 = np.loadtxt(VALID_EMBEDDING_PATH, delimiter=',')
# nfnet
CFG.LOSS_MODULE = CFG.LOSS_MODULES[0]
CFG.MODEL_NAME = CFG.MODEL_NAMES[4]
CFG.MARGIN = CFG.MARGINS[4]
CFG.MODEL_PATH = f'{CFG.MODEL_NAME}_{CFG.LOSS_MODULE}_face_epoch_8_bs_8_margin_{CFG.MARGIN}.pt'
TEST_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_test_embed.csv'
print(f'Loading {TEST_EMBEDDING_PATH} ...')
test_embeddings_5 = np.loadtxt(TEST_EMBEDDING_PATH, delimiter=',')
VALID_EMBEDDING_PATH = CFG.EMB_PATH_PREFIX + CFG.MODEL_PATH[:-3] + '_valid_embed.csv'
print(f'Loading {VALID_EMBEDDING_PATH} ...')
valid_embeddings_5 = np.loadtxt(VALID_EMBEDDING_PATH, delimiter=',')
# get voting of valid embeddings
valid_embeddings_dict = {'emb_0':valid_embeddings_1,
'emb_1':valid_embeddings_2,
'emb_2':valid_embeddings_3,
'emb_3':valid_embeddings_4,
'emb_4':valid_embeddings_5}
distances, indices = get_voting_nns(valid_embeddings_dict)
# search best thresholds
search_voting_threshold(valid_df, distances, indices)
# get voting of test embeddings
test_embeddings_dict = {'emb_0':test_embeddings_1,
'emb_1':test_embeddings_2,
'emb_2':test_embeddings_3,
'emb_3':test_embeddings_4,
'emb_4':test_embeddings_5}
distances, indices = get_voting_nns(test_embeddings_dict)
# use obtained thresholds to get test results
image_predictions = get_voting_predictions(test_df, distances, indices, threshold = CFG.BEST_THRESHOLD, min2 = False)
# use obtained thresholds to get test results after min2
image_predictions_min2 = get_voting_predictions(test_df, distances, indices, threshold = CFG.BEST_THRESHOLD_MIN2, min2 = True)
##################################### text ####################################
valid_embeddings_dict = {}
test_embeddings_dict = {}
for i in range(len(CFG.BERT_MODEL_NAMES)):
CFG.BERT_MODEL_NAME = CFG.BERT_MODEL_NAMES[i]
CFG.MARGIN = CFG.BEST_BERT_MARGINS[0][i]
model = ShopeeBertModel(
model_name = CFG.BERT_MODEL_NAME,
margin = CFG.MARGIN
)
CFG.MODEL_PATH_BERT = f"{CFG.BERT_MODEL_NAME.rsplit('/', 1)[-1]}_epoch8-bs16x1_margin_{CFG.MARGIN}.pt"
SAVE_MODEL_PATH = CFG.TEXT_MODEL_PATH_PREFIX + CFG.MODEL_PATH_BERT
print(CFG.MODEL_PATH_BERT)
model.load_state_dict(torch.load(SAVE_MODEL_PATH, map_location=CFG.DEVICE))
valid_embeddings_dict[f'emb_{i}'] = get_bert_embeddings(valid_df, 'title', model)
test_embeddings_dict[f'emb_{i}'] = get_bert_embeddings(test_df, 'title', model)
distances, indices = get_voting_nns(valid_embeddings_dict)
# search best thresholds
search_voting_threshold(valid_df, distances, indices)
distances, indices = get_voting_nns(test_embeddings_dict)
# use obtained thresholds to get test results
text_predictions = get_voting_predictions(test_df, distances, indices, threshold = CFG.BEST_THRESHOLD, min2 = False)
# use obtained thresholds to get test results after min2
text_predictions_min2 = get_voting_predictions(test_df, distances, indices, threshold = CFG.BEST_THRESHOLD_MIN2, min2 = True)
#################################### union predictions ###################################
result_list = [[0 for i in range(3)] for j in range(2)]
test_df['image_predictions'] = image_predictions
test_df['text_predictions'] = text_predictions
test_df['pred_matches'] = test_df.apply(combine_predictions, axis = 1)
test_df['f1'] = f1_score(test_df['matches'], test_df['pred_matches'])
test_df['recall'] = recall_score(test_df['matches'], test_df['pred_matches'])
test_df['precision'] = precision_score(test_df['matches'], test_df['pred_matches'])
test_f1 = test_df.f1.mean()
test_recall = test_df.recall.mean()
test_precision = test_df.precision.mean()
result_list[0][0] = test_f1
result_list[0][1] = test_recall
result_list[0][2] = test_precision
print(f'Test f1 score = {test_f1}, recall = {test_recall}, precision = {test_precision}')
# Min2
test_df['image_predictions'] = image_predictions_min2
test_df['text_predictions'] = text_predictions_min2
test_df['pred_matches'] = test_df.apply(combine_predictions, axis = 1)
test_df['f1'] = f1_score(test_df['matches'], test_df['pred_matches'])
test_df['recall'] = recall_score(test_df['matches'], test_df['pred_matches'])
test_df['precision'] = precision_score(test_df['matches'], test_df['pred_matches'])
test_f1 = test_df.f1.mean()
test_recall = test_df.recall.mean()
test_precision = test_df.precision.mean()
result_list[1][0] = test_f1
result_list[1][1] = test_recall
result_list[1][2] = test_precision
print(f'Test f1 score after min2 = {test_f1}, recall = {test_recall}, precision = {test_precision}')
result_df = pd.DataFrame(result_list,columns=['f1','recall','precision'])
return result_df
```
#### File: input/shopee-competition-utils/shopee_image_model.py
```python
import timm
from torch import nn
from config import CFG
from loss_module import ArcMarginProduct, CurricularFace
class ShopeeModel(nn.Module):
def __init__(
self,
n_classes = CFG.CLASSES,
model_name = CFG.MODEL_NAME,
fc_dim = CFG.FC_DIM,
margin = CFG.MARGIN,
scale = CFG.SCALE,
use_fc = True,
pretrained = True,
use_arcface = CFG.USE_ARCFACE):
super(ShopeeModel,self).__init__()
print(f'Building Model Backbone for {model_name} model, margin = {margin}')
self.backbone = timm.create_model(model_name, pretrained=pretrained)
if 'efficientnet' in model_name:
final_in_features = self.backbone.classifier.in_features
self.backbone.classifier = nn.Identity()
self.backbone.global_pool = nn.Identity()
elif 'resnet' in model_name:
final_in_features = self.backbone.fc.in_features
self.backbone.fc = nn.Identity()
self.backbone.global_pool = nn.Identity()
elif 'resnext' in model_name:
final_in_features = self.backbone.fc.in_features
self.backbone.fc = nn.Identity()
self.backbone.global_pool = nn.Identity()
elif 'densenet' in model_name:
final_in_features = self.backbone.classifier.in_features
self.backbone.classifier = nn.Identity()
self.backbone.global_pool = nn.Identity()
elif 'nfnet' in model_name:
final_in_features = self.backbone.head.fc.in_features
self.backbone.head.fc = nn.Identity()
self.backbone.head.global_pool = nn.Identity()
self.pooling = nn.AdaptiveAvgPool2d(1)
self.use_fc = use_fc
if use_fc:
self.dropout = nn.Dropout(p=0.0)
self.fc = nn.Linear(final_in_features, fc_dim)
self.bn = nn.BatchNorm1d(fc_dim)
self._init_params()
final_in_features = fc_dim
if use_arcface:
self.final = ArcMarginProduct(final_in_features,
n_classes,
s=scale,
m=margin)
else:
self.final = CurricularFace(final_in_features,
n_classes,
s=scale,
m=margin)
def _init_params(self):
nn.init.xavier_normal_(self.fc.weight)
nn.init.constant_(self.fc.bias, 0)
nn.init.constant_(self.bn.weight, 1)
nn.init.constant_(self.bn.bias, 0)
def forward(self, image, label):
feature = self.extract_feat(image)
logits = self.final(feature,label)
return logits
def extract_feat(self, x):
batch_size = x.shape[0]
x = self.backbone(x)
x = self.pooling(x).view(batch_size, -1)
if self.use_fc:
x = self.dropout(x)
x = self.fc(x)
x = self.bn(x)
return x
``` |
{
"source": "Jingxue-24/QM2-team9",
"score": 4
} |
#### File: QM2-team9/LinearRegression/linear_regression.py
```python
def LinearRegression():
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
name_of_csv="cluster_all.csv"
df = pd.read_csv(name_of_csv)
df.to_numpy()
X = df['Popular_date']
y = df['Loudness']
X_np=X.to_numpy()
y_np=y.to_numpy()
X_np=X_np.reshape(-1, 1)
y_np=y_np.reshape(-1, 1)
LinearRegression().fit(X_np, y_np).coef_
LinearRegression().fit(X_np, y_np).intercept_
plt.plot(X_np, y_np)
plt.scatter(X_np, y_np)
R_squared= LinearRegression().fit(X_np, y_np).score(X_np, y_np)
print(R_squared)
return plt.plot(X_np, LinearRegression().fit(X_np, y_np).coef_*X_np + LinearRegression().fit(X_np, y_np).intercept_)
``` |
{
"source": "JingYang1124/Wand-Based-on-ESP32",
"score": 3
} |
#### File: 1D_CNN_Trainning/Model_Compile/1D_CNN.py
```python
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedShuffleSplit
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import np_utils
train = pd.read_csv('E:/Pro/python_proj/python_proj/WAND_Release/1D_CNN/dataset/100train.csv')#导入训练集
test = pd.read_csv('E:/Pro/python_proj/python_proj/WAND_Release/1D_CNN/dataset/100test.csv')#导入测试集
#这里是代码的精髓之处,编辑数据集中的标签,直接使用即可
def encode(train, test):
label_encoder = LabelEncoder().fit(train.species)
labels = label_encoder.transform(train.species)# From the species to natural number: 0, 1, 2 ...
#print(labels): [0 0 1 0 1 0 0 1 1 0 0 ......
classes = list(label_encoder.classes_)# Original classes that corresponds to the labels(numbers) : ['bad','good']
train = train.drop(['species', 'id'], axis=1) #.drop() delete the columns
test = test.drop('id', axis=1)
return train, labels, test, classes
train, labels, test, classes = encode(train, test)
scaled_train=train.values #Only obtain the pure values in "train"
# print(scaled_train) :[[0.36971345 0.20764128 1.01101039 ... 0.86127609 0.66460005 0.56369004]
# [0.89455038 0.60264666 0.58669376 ... 0.61239623 0.85502414 0.70027092] ....
# SSS将每个数据集中的30%用作测试
sss = StratifiedShuffleSplit(test_size=0.2, random_state=23) #分层抽样功能,确保每个标签对应的样本的比例
for train_index, valid_index in sss.split(scaled_train, labels):
X_train, X_valid = scaled_train[train_index], scaled_train[valid_index]
y_train, y_valid = labels[train_index], labels[valid_index]
nb_features = 70
nb_class = len(classes)
num_pixels = nb_features*3
num_classes = nb_class
y_train = np_utils.to_categorical(y_train, nb_class)
y_valid = np_utils.to_categorical(y_valid, nb_class)
model = Sequential()
model.add(Dense(num_pixels, input_dim=num_pixels, kernel_initializer='normal', activation='relu'))
# model.add(Dense(100, activation='tanh'))
model.add(Dense(num_classes, kernel_initializer='normal', activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train, validation_data=(X_valid, y_valid), epochs=20, batch_size=50, verbose=2)
model.save('Spell_model.h5')
model.summary()
```
#### File: Python_Scripts/Spell_Recognition/Predict.py
```python
import time
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from keras.models import load_model
nb_features = 70
c = pd.read_csv('E:/Pro/PythonPro/1D_CNN/dataset/10000.csv')#导入测试集
train = pd.read_csv('E:/Pro/PythonPro/1D_CNN/dataset/100train.csv')#导入训练集
def encode(train):
label_encoder = LabelEncoder().fit(train.species)
classes = list(label_encoder.classes_)# Original classes that corresponds to the labels(numbers) : ['bad','good']
return classes
classes = encode(train)
a=np.array(c)
print(a.shape)
model = load_model("Spell_model.h5")
# layerNumber = 0
# for layer in model.layers:
# weights = layer.get_weights()
# print(layerNumber)
# layerNumber += 1
# print(weights[0])
oldtime=time.process_time()
predict=model.predict_classes(a)
newtime=time.process_time()
print("Predict result is:")
for i in range(0,a.shape[0]):
print(classes[predict[i]])
print (u'相差:%s s'%(newtime-oldtime))
``` |
{
"source": "jingyanw/outcome-induced-debiasing",
"score": 3
} |
#### File: jingyanw/outcome-induced-debiasing/estimator.py
```python
import numpy as np
import cvxpy as cp
# Split data into training-validation sets
def split_trainval_random_pair(d, n, ordering=None):
idxs = sample_total_from_partial(d, n, ordering=ordering)
train = np.full((d, n), False)
val = np.full((d, n), False)
for i in range(d): # for each row
idx_first = n * i
idx_last = n * (i+1) - 1
idxs_row = list(filter(lambda i: i >= idx_first and i <= idx_last, idxs)) # preserve ordering
L = int(len(idxs_row) / 2)
for t in range(L):
if np.random.uniform() < 0.5:
(idx_train, idx_val) = idxs_row[2*t : 2 * (t+1)]
else:
(idx_val, idx_train) = idxs_row[2*t : 2 * (t+1)]
train[i, idx_train % n] = True
val[i, idx_val % n] = True
if L * 2 < len(idxs_row): # odd
assert(len(idxs_row) - L*2 == 1)
# assign last element to val
val[i, idxs_row[-1] % n] = True
assert(not np.any(np.logical_and(train, val)) )
assert(np.array_equal(np.logical_or(train, val), np.full((d, n), True)))
return train, val
# Perform CV
# Returns:
# IL_CV: scalar index
# XS_CV: d-array
def perform_cv(y, lambdas, ordering=None, num_samples=100):
(d, n) = np.shape(y)
(set_train, set_val) = split_trainval_random_pair(d, n, ordering)
# train
# XS_TRAIN: (d, L)
# BS_TRAIN: (d, n, L) (only the train entries have non-zero values)
(xs_train, bs_train) = solve_opt(y, lambdas, mask=set_train, ordering=ordering)
L = len(lambdas)
# compute val error
errs_val = np.zeros(L)
for il in range(L):
xs = xs_train[:, il]
bs_val = interpolate_values(bs_train[:, :, il], set_train, set_val, \
ordering=ordering, num_samples=num_samples)
y_interpolate = bs_val + xs_train[:, il][:, np.newaxis]
y_interpolate[np.logical_not(set_val)] = 0 # just to be safe
errs_val[il] = np.mean(np.square( (y_interpolate - y) * set_val ))
# choose \lambda (CV)
xs_cv = np.zeros(d)
err_min = np.min(errs_val)
il_cv = np.where(errs_val == err_min)[0]
if len(il_cv) > 1:
print('Multiple choices of optimal lambda in CV!')
il_cv = np.random.choice(il_cv) # breaking ties
xs_cv = xs_train[:, il_cv]
return il_cv, xs_cv
# Interpolate values in SET_VAL -- take neighbor wrt ORDERING
# SET_TRAIN/VAL: dxn mask {True, False}
# NUM_SAMPLE: number of sampled total orderings
# ORDERING: take the nearest neighbor according to TOTAL_ORDER
# Return:
# BS: filled in SET_VAL
def interpolate_values(bs, set_train, set_val, ordering=None, num_samples=100):
(d, n) = bs.shape
bs[set_val] = 0 # just to be safe
idxs_train = np.where(set_train.flatten())[0]
for r in range(num_samples):
total_order = sample_total_from_partial(d, n, ordering=ordering)
positions_train = np.isin(total_order, idxs_train)
positions_train = np.where(positions_train)[0] # positions of train samples in TOTAL_ORDER
for i in range(d):
for j in range(n):
if not set_val[i, j]:
continue
idx_val = np.ravel_multi_index((i, j), (d, n))
position_val = np.where(total_order == idx_val)[0][0]
diff = np.abs(positions_train - position_val)
diff_min = np.min(diff)
positions_neighbor = positions_train[diff == diff_min]
assert(len(positions_neighbor) <= 2)
idxs_neighbor = total_order[positions_neighbor] # positions_neighbor is a set of size 1 or 2, not a single element
for idx_neighbor in idxs_neighbor:
assert(idx_neighbor in idxs_train)
(i_neighbor, j_neighbor) = np.unravel_index(idx_neighbor, (d, n))
bs[i, j] += bs[i_neighbor, j_neighbor] / len(idxs_neighbor) / num_samples
return bs
# Inputs:
# Y: d-by-n matrix -- rating data
# D: number of courses
# N: number of ratings
# LAMBDAS: an L-array of candidate lambda values
# Can handle inf (naive sample mean)
# MASK: d x n matrix
# True: use this score for estimation (train) | False: don't use this score for estimation (save for val)
# ORDERINGS (of grades): d-by-n
# e.g. [1, 0, 2] means b1 < b0 < b2
# MODE_FAST: use properties of the optimization to derive constraints
# Does not change the results
# Returns:
# XS: d x L
# BS: d x n x L
def solve_opt(Y, lambdas, mask=None, ordering=None, mode_fast=True):
if Y.ndim == 1:
Y = Y[np.newaxis, :] # 1 x n
(d, n) = Y.shape
L = len(lambdas)
if mask is None:
mask = np.full((d, n), True)
x = cp.Variable((d, 1))
b = cp.Variable((d, n))
lda = cp.Parameter(nonneg=True)
x_broadcast = cp.kron(np.ones((1,n)), x)
# for the second (L2) term, the mask shouldn't matter, since b_val is set to 0 in optimization
obj = cp.sum_squares( cp.multiply(Y - x_broadcast - b, mask) ) + lda * cp.sum_squares(cp.multiply(b, mask))
# construct constraints
inequalities = []
if ordering is not None:
if ordering.ndim == 1:
ordering = ordering[np.newaxis, :] # 1 x n
assert(is_valid_ordering(ordering))
vmax = int(np.max(ordering))
for v in range(vmax+1): # find the closest smaller in train
(xs_high, ys_high) = np.where( np.logical_and(ordering == v, mask) )
size_high = len(xs_high)
if size_high == 0: continue
if mode_fast: # b ordering same as y ordering within a single course, single group
for i in range(d):
ys_high_block = ys_high[xs_high==i]
y_block = Y[i, ys_high_block]
if len(y_block) >= 2:
idxs_order = np.argsort(y_block)
for k in range(len(y_block)-1):
inequalities.append(b[i, ys_high_block[idxs_order[k]]] <= b[i, ys_high_block[idxs_order[k+1]]])
mask_low = np.logical_and(ordering < v, mask)
if not np.any(mask_low): continue
v_low = np.max(ordering[mask_low])
(xs_low, ys_low) = np.where( np.logical_and(ordering == v_low, mask) )
size_low = len(xs_low)
if not mode_fast:
for ih in range(size_high):
for i in range(size_low):
xh = xs_high[ih]
yh = ys_high[ih]
xl = xs_low[i]
yl = ys_low[i]
inequalities.append(b[xh, yh] >= b[xl, yl])
else: # mode_fast
for ih in range(d):
for i in range(d):
ys_high_block = ys_high[xs_high == ih]
ys_low_block = ys_low[xs_low == i]
if len(ys_high_block) > 0 and len(ys_low_block) > 0:
y_high_block = Y[ih, ys_high_block]
idx_high = np.argsort(y_high_block)[0] # min
y_low_block = Y[i, ys_low_block]
idx_low = np.argsort(y_low_block)[-1] # max
inequalities.append(b[ih, ys_high_block[idx_high]] >= b[i, ys_low_block[idx_low]])
# dummy constraints for the validation data to be 0
(xs, ys) = np.where(np.logical_not(mask))
for i in range(len(xs)):
inequalities.append(b[xs[i], ys[i]] == 0)
xs_sol = np.zeros((d, L))
bs_sol = np.zeros((d, n, L))
for il in range(L):
l = lambdas[il]
if l == np.inf:
xs_sol[:, il] = np.sum(Y * mask, axis=1) / np.sum(mask, axis=1) # sample mean
bs_sol[:, :, il] = np.zeros((d, n))
else:
lda.value = l
if len(inequalities) == 0:
prob = cp.Problem( cp.Minimize(obj))
else:
prob = cp.Problem( cp.Minimize(obj), inequalities)
try:
prob.solve(solver=cp.ECOS)
except:
print('Solving error (lambda=%.3f): %s' % (lambdas[il], sys.exc_info()[0]) )
prob.solve(solver=cp.SCS)
if l == 0: # break ties -- find the correct shift among all solutions
b0 = b.value + x.value # broadcast operation
x0 = cp.Variable((d, 1))
x0_broadcast = cp.kron(np.ones((1,n)), x0)
obj0 = cp.sum_squares( cp.multiply(b0 - x0_broadcast, mask) )
inequalities0 = []
# re-construct the inequalities again
if ordering is not None:
for v in range(vmax+1):
(xs_high, ys_high) = np.where( np.logical_and(ordering == v, mask) )
size_high = len(xs_high)
# no need for constraints for a single course, single group, because it's already enforced in the first optimization
mask_low = np.logical_and(ordering < v, mask)
if size_high == 0 or not np.any(mask_low):
continue
v_low = np.max(ordering[mask_low])
(xs_low, ys_low) = np.where( np.logical_and(ordering == v_low, mask) )
size_low = len(xs_low)
if not mode_fast:
for ih in range(size_high):
for i in range(size_low):
xh = xs_high[ih]
yh = ys_high[ih]
xl = xs_low[i]
yl = ys_low[i]
if xh != xl: # address numerical infeasibility issue -- within course ordering constraints have already been resolved
inequalities0.append(b0[xh, yh] - x0[xh] >= b0[xl, yl] - x0[xl])
else: # mode_fast
for ih in range(d):
for i in range(d):
if ih == i: continue # handled by previous optimization
ys_high_block = ys_high[xs_high == ih]
ys_low_block = ys_low[xs_low == i]
if len(ys_high_block) > 0 and len(ys_low_block) > 0:
y_high_block = b0[ih, ys_high_block]
idx_high = np.argsort(y_high_block)[0] # min
y_low_block = b0[i, ys_low_block]
idx_low = np.argsort(y_low_block)[-1] # max
inequalities0.append(b0[ih, ys_high_block[idx_high]] - x0[ih] >= b0[i, ys_low_block[idx_low]] - x0[i])
prob0 = cp.Problem( cp.Minimize(obj0), inequalities0)
try:
prob0.solve(solver=cp.ECOS)
except:
print('Solving error (lambda=0).')
prob0.solve(solver=cp.SCS)
xs_sol[:, il] = x0.value.flatten()
bs_sol[:, :, il] = b0 - x0.value
else: # l > 0
xs_sol[:, il] = x.value.flatten()
bs_sol[:, :, il] = b.value
return (xs_sol, bs_sol)
# Sample total ordering from partial ordering
# Returns:
# 1x(nd) order (from small to large entries)
def sample_total_from_partial(d, n, ordering=None):
if ordering is None:
return np.random.permutation(d*n)
else: # ordering not None
assert(is_valid_ordering(ordering))
order = ordering.flatten()
order = order + 0.1 * np.random.uniform(size=d*n)
idxs = np.argsort(order)
return idxs
# From any values to adjacent-valued integers preserving the same rank, starting from 0:
def is_valid_ordering(ordering):
vals = np.unique(ordering)
if np.min(ordering) == 0:
if np.all(np.diff(vals) == 1):
return True
return False
``` |
{
"source": "jingyanxu/Patient-specific_hyperparameter_learning_CT",
"score": 2
} |
#### File: jingyanxu/Patient-specific_hyperparameter_learning_CT/run_test.py
```python
from sys import argv, exit
import random
import matplotlib.pyplot as plt
import numpy as np
import os
import tensorflow as tf
import time
from optparse import OptionParser
from utils_smth_layer_fast import *
from utils_cnn_model import *
from utils_tfds import load_data_fbp
from utils_ramp_filter_tf import *
os.environ['PYTHONINSPECT'] = '1'
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
#plt.rcParams['figure.figsize'] = (8.0, 6.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
if (len (argv) < 2) :
print ("usage: {} epoch #".format (os.path.basename (argv[0])))
print ('''
-f [ # of filters, default 128 ]
-b [ # of blocks, default 2 ]
''')
exit (1)
parser = OptionParser()
parser.add_option("-c", "--chans", type = "int", dest="nchans", \
help="num of chans", default=672)
parser.add_option("-v", "--views", type = "int", dest="nviews", \
help="num of views", default=1160)
parser.add_option("-t", "--stepsize", type = "float", dest="lr", \
help="learning rate", default=1.0e-4)
parser.add_option("-f", "--nfilters", type = "int", dest="nfilters", \
help="num of filters", default=128)
parser.add_option("-b", "--nblocks", type = "int", dest="nblocks", \
help="num of blocks", default=2)
(options, args) = parser.parse_args()
if tf.test.gpu_device_name():
print('GPU found')
else:
print("No GPU found")
header = 8369
nchans = 672
nviews = 1160
# max train is around 15,000
num_train = 1
num_val = 1
num_test = 1
nfilters = options.nfilters
nblocks = options.nblocks
train_fname = "./test_names.txt" # a place holder for running test
test_fname = "./test_names.txt"
input_len = nchans
initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=0.1)
inputs = tf.keras.layers.Input (shape = (input_len, 1 ))
is_training = False
beta_o = Conv1Dthin_d (inputs, nfilters, nblocks, initializer, trainable=is_training )
beta_cnn = tf.keras.Model (inputs = inputs, outputs=beta_o )
sinosmth_layer = Sinosmth_grad_k ( input_len)
print (beta_cnn.summary())
num_epochs = 1
batch_size = 1
steps_per_epoch = num_train// batch_size
train_steps = num_epochs*steps_per_epoch
init_learning_rate = options.lr
print_every = 15
save_every = 1
learning_rate_fn = tf.optimizers.schedules.PolynomialDecay(init_learning_rate, train_steps, 0.5*init_learning_rate, 2)
def optimizer_init_fn():
# return tf.keras.optimizers.SGD(learning_rate=learning_rate)
return tf.keras.optimizers.Adam(learning_rate=learning_rate_fn, epsilon=1e-8)
optimizer = optimizer_init_fn()
train_ds, val_ds, test_ds = load_data_fbp (train_fname, test_fname, num_train, num_val, num_test, batch_size = batch_size)
is_reload = True
alpha = 0
apod = 2 # smooth 0, 1, 2
xdim = 512
fov_xdim = 200
fov_ydim = 200
lower = 0
upper = xdim - fov_xdim
xoff = 255.5
yoff = 255.5
fov = 2*245
x_size = 0.975
xvec = (np.arange(xdim) - xoff ) * x_size
yvec = (np.arange(xdim) - yoff ) * x_size
xmat, ymat = np.meshgrid (xvec, yvec)
# here we have a recon mask
mask = np.where (xmat * xmat + ymat*ymat <= (fov/2)*(fov/2) , 1, 0 )
mask = tf.convert_to_tensor (mask, dtype = tf.float32)
checkpoint_directory = './checkpoint_dir/'
ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=optimizer, model=beta_cnn)
manager = tf.train.CheckpointManager(ckpt, directory=checkpoint_directory, max_to_keep=50)
e0 = int(args[0] )
test_loss = tf.keras.metrics.Mean(name='test_loss')
tt_loss = ()
et_loss = ()
start_time = time.time ()
checkpoints_list = manager.checkpoints
t = 0
t_ = 0
for epoch in range (e0, 1) :
status = ckpt.restore(checkpoints_list [epoch] )
print("Restored from epoch {} {}".format(epoch, checkpoints_list [epoch] ) )
test_loss.reset_states()
for x_scale, y_np, x_np, label_fname in test_ds :
loss = 0
i = 0
x0 = random.randint (lower, upper)
y0 = random.randint (lower, upper)
# for debugging maybe good to fix the roi location
x0 = 255 - 100
y0 = 255 - 100
beta_o = beta_cnn (x_scale[i])
logits = sinosmth_layer ([x_np[i], beta_o])
recon0_n = rcn_fbp (logits, x0, y0, apod ) # fixed fov dim inside
# conversion to attenuation cm-1
label = y_np[i, y0:y0+fov_ydim, x0:x0+fov_xdim]/ 1000.0 * 0.0183
mask_roi = mask [y0:y0+fov_ydim, x0:x0+fov_xdim]
loss1 = tf.reduce_sum (((label - recon0_n)*mask_roi)**2)
loss2 = alpha* tf.reduce_sum(beta_o*beta_o)
loss += (loss1 + loss2 )
loss /= batch_size
test_loss.update_state (loss)
end_time = time.time ()
t += 1
et_loss += (loss.numpy() , )
if (t % print_every ==0 ) :
template = 'Iter {:6d}, Epoch {:4d}, Loss: {}, running avg_loss: {}, time {:6f}'
print (template.format(t, epoch+1,
loss.numpy(), test_loss.result().numpy(), end_time -start_time ))
start_time = end_time
tt_loss += (test_loss.result().numpy(), )
print (tt_loss[-1] )
h2o = 0.0183
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(14,6))
hdl1 = ax[0].imshow (recon0_n.numpy() )
hdl2 = ax[1].imshow (label.numpy() )
hdl3 = ax[2].imshow (mask_roi.numpy() )
ax[0].set_title ('recon')
ax[1].set_title ('label')
ax[2].set_title ('mask [0/1]')
cw = 1024/1000*h2o
ww = 400/1000*h2o
hdl1.set_clim ( [cw - ww/2, cw+ww/2])
hdl2.set_clim ( [cw - ww/2, cw+ww/2])
plt.show(block = False)
```
#### File: jingyanxu/Patient-specific_hyperparameter_learning_CT/utils_ramp_filter_tf.py
```python
import tensorflow as tf
import numpy as np
eps = 1e-4
roi_xdim = 200
roi_ydim = 200
#x0 = 152
#y0 = 109
nviews = 1160
nchans = 672
fov = 2*245
u_size = 1.4083
D = 1040.0
duD = u_size/D
params = {}
params["start_angle"] = 0
params["nreadings"] = nviews
params ["xdim"] = roi_xdim
params ["ydim"] = roi_ydim
params ["fov"] = fov
params ["x_size"] = 0.975
params ["y_size"] = 0.975
params ["s2d"] = D
params ["s2o"] = 570.0
params ["u_off"] = 336.625
params ["u_size"] = u_size
params ["num_u"] = nchans
params ["print_every"] = 4
params ["angles_per_set"] = nviews
params ["duD"] = duD
params ["xoff"] = 255.5
params ["yoff"] = 255.5
params ['apod'] = 0
def vfunc (x ) :
y2 = tf.ones_like (x) * 0.5
mask = tf.cast (tf.where(tf.abs(x) > eps, 1, 0), x.dtype)
y1 = tf.math.divide_no_nan (tf.sin (x) , x) + tf.math.divide_no_nan( (tf.cos (x) - 1), (x*x) )
y = y1 * mask + (1 - mask) * y2
return y
def wfunc (x ) :
y2 = tf.zeros_like (x)
mask = tf.cast (tf.where ( tf.abs (x) > eps, 1, 0), x.dtype )
y1 = tf.math.divide_no_nan(tf.cos (x) -1 , x )
y = mask * y1 + (1 - mask) * y2
return y
def apply_ramp_filter (proj, apod ) :
du = params['duD']
nviews = params['nreadings']
nchans = params['num_u']
uoff = params['u_off']
# apod = params['apod']
# apod = 0
nchans_pad = int(np.exp2 (np.ceil(np.log2 (nchans) ) ) )
print (nchans, nchans_pad)
# ramp_filter = tf.zeros ((2*nchans_pad,))
ramp_imag = tf.zeros ((2*nchans_pad,), dtype = tf.float64)
rf_p2 = tf.zeros ( 2*nchans_pad -nchans+1-nchans, dtype = tf.float64)
du = tf.cast (du, tf.float64)
uoff = tf.constant( uoff, dtype=tf.float64)
# gamma=(-uoff + tf.cast (range(nchans), tf.float64))*du
gamma=(-uoff + tf.range(nchans, dtype=tf.float64))*du
cosweight= tf.cos(gamma)
proj = tf.cast (proj, dtype = tf.float64) * cosweight
# proj = proj
if (apod == 0) :
hamming = tf.constant ( 0.5, dtype = tf.float64)
else :
hamming = tf.constant (1.0, dtype=tf.float64)
if (apod == 1) :
hamming = tf.constant (1.0, dtype=tf.float64)
else :
hamming = tf.constant( 0.5, dtype=tf.float64)
pi = tf.constant (np.pi, dtype = tf.float64)
arg1 = pi*tf.range(nchans, dtype=tf.float64)
arg2 = pi*tf.range (-nchans+1, 0, dtype=tf.float64)
a1 = arg1 * du/pi
a2 = arg2 * du/pi
w12 = tf.ones_like (a1[nchans :])
w11 = (a1[1:nchans] / tf.sin ( a1[1:nchans]) )**2
w1 = tf.concat ( [tf.constant ([1.], dtype=tf.float64), w11, w12], axis = -1)
w2 = (a2/tf.sin(a2))**2
if (apod == 0 ) or (apod == 1) :
h1 = hamming * vfunc (arg1) + 0.5 * (1 - hamming)* (vfunc (pi + arg1) + vfunc (pi- arg1))
h2 = hamming * vfunc (arg2) + 0.5 * (1 - hamming)* (vfunc (pi + arg2) + vfunc (pi- arg2))
rf_p1 = h1 * w1
rf_p3 = h2 * w2
ramp_filter = tf.concat ( [rf_p1, rf_p2, rf_p3], axis=-1) / (2 * du)
else :
h1 = wfunc (arg1 + pi/2) - wfunc (arg1 - pi/2)
h2 = wfunc (arg2 + pi/2) - wfunc (arg2 - pi/2)
rf_p1 = h1*w1
rf_p3 = h2 * w2
ramp_filter = tf.concat ( [rf_p1, rf_p2, rf_p3], axis=-1) / ( - 2 * du* pi)
ramp_filter_fft = tf.signal.fft (tf.complex (ramp_filter, ramp_imag))
proj_pad = tf.pad (proj , [[0, 0], [0, 2*nchans_pad -nchans] ], constant_values =0 )
proj_pad_fft = tf.signal.fft( tf.complex (proj_pad, tf.zeros_like (proj_pad)) )
proj_filter = tf.math.real (tf.signal.ifft(ramp_filter_fft * proj_pad_fft ) ) [:, 0:nchans]
proj_filter *= cosweight*cosweight
#proj_filter = tf.cast (proj_filter, dtype = tf.float32)
return proj_filter , cosweight
# this is float32 version
def backprojection_nv (proj, x0, y0, xdim=64, ydim=64 ) :
start_angle = params['start_angle']
nreadings = params ['nreadings']
# xdim = params['xdim']
# ydim = params['ydim']
fov = params ['fov']
x_size = params['x_size']
y_size = params['y_size']
s2d = params['s2d']
s2o = params['s2o']
u_off = params['u_off']
u_size = params['u_size']
nchans = params['num_u']
print_every = params['print_every']
angles_per_set = params ["angles_per_set"]
du = params ["duD"]
#xoff = params ["xoff"]
#yoff = params ["yoff"]
xoff = params ["xoff"] - x0
yoff = params ["yoff"] - y0
dtype = tf.float32
proj = tf.cast (proj, dtype)
xvec = (tf.range (xdim, dtype=dtype) - xoff )*tf.constant (x_size, dtype=dtype )
yvec = (tf.range (ydim, dtype=dtype) - yoff )*tf.constant (y_size, dtype=dtype)
(xmat, ymat) = tf.meshgrid (xvec, yvec)
#ymat = ymat[::-1, :]
# angles_per_set = 32
nsubsets = nreadings//angles_per_set
xmat1 = tf.expand_dims (xmat, axis=-1)
ymat1 = tf.expand_dims (ymat, axis=-1)
#idx = tf.reshape (tf.tile (tf.reshape(tf.range (angles_per_set), [-1,1]), [1, xdim*ydim] ), [-1] )
idx = tf.reshape (tf.tile (tf.range (angles_per_set), [xdim*ydim]), [-1] )
pi = tf.constant (np.pi, dtype = dtype)
viewangles = tf.range(nreadings, dtype = dtype)*2.0*pi/nreadings - start_angle/180*pi
dangle = tf.abs (viewangles[1] - viewangles[0])
bim = tf.zeros ( [ydim, xdim], dtype=dtype)
for isubset in range(nsubsets ) :
if (isubset % print_every == 0) :
print (isubset, end = " " )
iv0 = isubset * angles_per_set
iv1 = (isubset + 1) * angles_per_set
angles = viewangles [iv0:iv1]
e11, e12 = -tf.cos(angles) , -tf.sin(angles)
e21, e22 = e12, -e11
num = xmat1 *e21 + ymat1*e22
den = s2o + xmat1*e11 + ymat1*e12
coord = tf.reshape (tf.math.atan2 (num, den)*s2d/u_size + u_off, [ -1 ] )
lower = tf.cast (tf.floor (coord), tf.int32)
upper = lower + 1
lower = tf.clip_by_value (lower, 0, nchans-1)
weights = coord - tf.cast (lower, dtype)
lower = tf.stack ([idx, tf.clip_by_value (lower, 0, nchans-1)], axis =1)
upper = tf.stack([idx, tf.clip_by_value (upper, 0, nchans-1) ], axis =1)
lim = tf.gather_nd (proj [iv0:iv1, :], lower )
uim = tf.gather_nd (proj [iv0:iv1, :], upper )
im1 = tf.reshape (lim * (1 - weights) + uim * weights , [ydim, xdim, angles_per_set] )
bim += tf.reduce_sum (im1/den/den , axis = -1)
bim *= s2o*pi/nreadings
bim = tf.cast (bim, dtype = tf.float32)
return bim
def backprojection_nv_d (proj, x0, y0, xdim=64, ydim=64) :
start_angle = params['start_angle']
nreadings = params ['nreadings']
# xdim = params['xdim']
# ydim = params['ydim']
fov = params ['fov']
x_size = params['x_size']
y_size = params['y_size']
s2d = params['s2d']
s2o = params['s2o']
u_off = params['u_off']
u_size = params['u_size']
nchans = params['num_u']
print_every = params['print_every']
angles_per_set = params ["angles_per_set"]
du = params ["duD"]
#xoff = params ["xoff"]
#yoff = params ["yoff"]
dtype = tf.float64
xoff = params ["xoff"] - tf.cast( x0, dtype = dtype)
yoff = params ["yoff"] - tf.cast( y0, dtype = dtype)
xvec = (tf.range (xdim, dtype=dtype) - xoff )*tf.constant (x_size, dtype=dtype )
yvec = (tf.range (ydim, dtype=dtype) - yoff )*tf.constant (y_size, dtype=dtype)
(xmat, ymat) = tf.meshgrid (xvec, yvec)
#ymat = ymat[::-1, :]
# angles_per_set = 32
nsubsets = nreadings//angles_per_set
xmat1 = tf.expand_dims (xmat, axis=-1)
ymat1 = tf.expand_dims (ymat, axis=-1)
#idx = tf.reshape (tf.tile (tf.reshape(tf.range (angles_per_set), [-1,1]), [1, xdim*ydim] ), [-1] )
idx = tf.reshape (tf.tile (tf.range (angles_per_set), [xdim*ydim]), [-1] )
pi = tf.constant (np.pi, dtype = dtype)
viewangles = tf.range(nreadings, dtype = dtype)*2.0*pi/nreadings - start_angle/180*pi
dangle = tf.abs (viewangles[1] - viewangles[0])
bim = tf.zeros ( [ydim, xdim], dtype=dtype)
for isubset in range(nsubsets ) :
if (isubset % print_every == 0) :
print (isubset, end = " " )
iv0 = isubset * angles_per_set
iv1 = (isubset + 1) * angles_per_set
angles = viewangles [iv0:iv1]
e11, e12 = -tf.cos(angles) , -tf.sin(angles)
e21, e22 = e12, -e11
num = xmat1 *e21 + ymat1*e22
den = s2o + xmat1*e11 + ymat1*e12
coord = tf.reshape (tf.math.atan2 (num, den)*s2d/u_size + u_off, [ -1 ] )
lower = tf.cast (tf.floor (coord), tf.int32)
upper = lower + 1
lower = tf.clip_by_value (lower, 0, nchans-1)
weights = coord - tf.cast (lower, dtype)
lower = tf.stack ([idx, tf.clip_by_value (lower, 0, nchans-1)], axis =1)
upper = tf.stack([idx, tf.clip_by_value (upper, 0, nchans-1) ], axis =1)
lim = tf.gather_nd (proj [iv0:iv1, :], lower )
uim = tf.gather_nd (proj [iv0:iv1, :], upper )
im1 = tf.reshape (lim * (1 - weights) + uim * weights , [ydim, xdim, angles_per_set] )
bim += tf.reduce_sum (im1/den/den , axis = -1)
bim *= s2o*pi/nreadings
bim = tf.cast (bim, dtype = tf.float32)
return bim
#tf.TensorSpec(shape=None, dtype=tf.int32)
#@<EMAIL>
@tf.function(
input_signature=[ tf.TensorSpec(shape=None, dtype=tf.float32), \
tf.TensorSpec(shape=None, dtype=tf.float32), \
tf.TensorSpec(shape=None, dtype=tf.float32) , \
tf.TensorSpec(shape=None, dtype=tf.int32) ])
def rcn_fbp (proj, x0, y0, apod ) :
proj_f, cosweight = apply_ramp_filter (proj, apod )
rcn = backprojection_nv (proj_f, x0, y0, 200, 200 )
return rcn
@tf.function(
input_signature=[ tf.TensorSpec(shape=None, dtype=tf.float32), \
tf.TensorSpec(shape=None, dtype=tf.float32), \
tf.TensorSpec(shape=None, dtype=tf.float32) , \
tf.TensorSpec(shape=None, dtype=tf.int32), \
tf.TensorSpec(shape=None, dtype=tf.int32), \
tf.TensorSpec(shape=None, dtype=tf.int32) ])
def rcn_fbp_d (proj, x0, y0, apod, xdim, ydim ) :
proj_f, cosweight = apply_ramp_filter (proj, apod )
rcn = backprojection_nv_d (proj_f, x0, y0, xdim, ydim)
return rcn
```
#### File: jingyanxu/Patient-specific_hyperparameter_learning_CT/utils_tfds.py
```python
import tensorflow as tf
import random
def parse_function (fname_in, fname_label, nchans = 672, nviews = 1160, header = 8369) :
imagestring = tf.strings.substr(tf.io.read_file (fname_in), pos=header, len=nchans*nviews*4)
image = tf.reshape(tf.io.decode_raw (imagestring, tf.float32 ), [nviews, nchans, 1])
labelstring = tf.strings.substr (tf.io.read_file (fname_label), pos = header, len=nchans*nviews*4)
label = tf.reshape(tf.io.decode_raw (labelstring, tf.float32 ), [nviews, nchans ])
return image, label, fname_label
def parse_function_fbp (fname_in, fname_label, nchans = 672, nviews = 1160, xdim = 512, header = 8369) :
imagestring = tf.strings.substr(tf.io.read_file (fname_in), pos=header, len=nchans*nviews*4)
image = tf.reshape(tf.io.decode_raw (imagestring, tf.float32 ), [nviews, nchans, 1])
labelstring = tf.strings.substr (tf.io.read_file (fname_label), pos = header, len=xdim*xdim*4)
label = tf.reshape(tf.io.decode_raw (labelstring, tf.float32 ), [xdim, xdim ])
return image, label, fname_label
def train_preprocess2 (image, label, label_fname) :
maxval = tf.math.reduce_max (image)
minval = tf.math.reduce_min (image)
image_s = (image - minval)/ (maxval - minval)
label = label
return image_s, label, tf.squeeze (image ) , label_fname
def parse_fnames (fnames_train, fnames_test, num_train, num_val, num_test) :
f = open (fnames_train, 'r')
lines = f.read().splitlines()
f.close ()
sublines = random.sample (lines, num_train+num_val)
fnames_ny = [x.split ()[1] for x in sublines]
fnames_label = [x.split ()[0] for x in sublines]
total = len (lines)
print ("total_train: %g, num_train: %g, num_val: %g" % (total, num_train, num_val ))
train_fnames_ny, train_fnames_label = fnames_ny[0:num_train] , fnames_label[0:num_train]
val_fnames_ny, val_fnames_label = fnames_ny[num_train:num_train+num_val] , \
fnames_label[num_train:num_train+num_val]
del lines, sublines, fnames_ny, fnames_label
f = open (fnames_test, 'r')
lines = f.read().splitlines()
f.close ()
# sublines = random.sample (lines, num_test)
sublines = lines [0: num_test]
fnames_ny = [x.split ()[1] for x in sublines]
fnames_label = [x.split ()[0] for x in sublines]
total = len (lines)
print ("total_test: %g, num_test: %g" % (total, num_test ))
test_fnames_ny, test_fnames_label = fnames_ny , fnames_label
return train_fnames_ny, train_fnames_label, val_fnames_ny, val_fnames_label, test_fnames_ny, test_fnames_label
# do not change batch_size for now ,
def load_data_fbp (train_name, test_name, num_train, num_val, num_test, batch_size = 1) :
train_fnames_ny, train_fnames_label, val_fnames_ny, val_fnames_label, test_fnames_ny, test_fnames_label = \
parse_fnames (train_name, test_name, num_train, num_val, num_test)
with tf.device ('/cpu:0') :
train_dataset = tf.data.Dataset.from_tensor_slices((train_fnames_ny, train_fnames_label))
train_dataset = train_dataset.shuffle(num_train).repeat(1)
train_dataset = train_dataset.map(parse_function_fbp, num_parallel_calls=4)
train_dataset = train_dataset.map(train_preprocess2, num_parallel_calls=4)
train_dataset = train_dataset.batch(batch_size, drop_remainder=True )
# train_dataset = train_dataset.unbatch ()
train_dataset = train_dataset.prefetch(batch_size)
val_dataset = tf.data.Dataset.from_tensor_slices((val_fnames_ny, val_fnames_label))
val_dataset = val_dataset.map(parse_function_fbp, num_parallel_calls=4)
val_dataset = val_dataset.map(train_preprocess2, num_parallel_calls=4)
val_dataset = val_dataset.batch(batch_size, drop_remainder=True)
# val_dataset = val_dataset.unbatch ()
val_dataset = val_dataset.prefetch(1 )
test_dataset = tf.data.Dataset.from_tensor_slices((test_fnames_ny, test_fnames_label))
test_dataset = test_dataset.map(parse_function_fbp, num_parallel_calls=4)
test_dataset = test_dataset.map(train_preprocess2, num_parallel_calls=4)
test_dataset = test_dataset.batch(batch_size, drop_remainder=True)
# test_dataset = test_dataset.unbatch ()
test_dataset = test_dataset.prefetch(1 )
return train_dataset, val_dataset, test_dataset
``` |
{
"source": "JingYeoh/bazel-common",
"score": 2
} |
#### File: tools/jarjar/jarjar.bzl
```python
def _jarjar_library(ctx):
ctx.actions.write(
output = ctx.outputs._rules_file,
content = "\n".join(ctx.attr.rules),
)
jar_files = depset(transitive = [jar.files for jar in ctx.attr.jars]).to_list()
# TODO(dpb): Extract this command to a separate shell script file
command = """
JAVA_HOME="$(cd "{java_home}" && pwd)" # this is used outside of the root
TMPDIR=$(mktemp -d)
for jar in {jars}; do
unzip -qq -B $jar -d $TMPDIR
done
pushd $TMPDIR &>/dev/null
# Concatenate similar files in META-INF that allow it.
mergeMetaInfFiles=(services/.* {merge_meta_inf_files})
findCmd=(find)
if [[ "$(uname -s)" == "Darwin" ]]; then
# Mac uses BSD find, which requires extra args for regex matching.
findCmd+=(-E)
findGroup='(~[0-9]*)?'
else
# Default to GNU find, which must escape parentheses.
findGroup='\\(~[0-9]*\\)?'
fi
for metaInfPattern in ${{mergeMetaInfFiles[@]}}; do
regexPattern="META-INF/${{metaInfPattern}}${{findGroup}}"
for file in $("${{findCmd[@]}}" META-INF -regex "$regexPattern"); do
original=$(echo $file | sed s/"~[0-9]*$"//)
if [[ "$file" != "$original" ]]; then
cat $file >> $original
rm $file
fi
done
done
rm META-INF/MANIFEST.MF*
rm -rf META-INF/maven/
duplicate_files=$(find * -type f -regex ".*~[0-9]*$")
if [[ -n "$duplicate_files" ]]; then
echo "Error: duplicate files in merged jar: $duplicate_files"
exit 1
fi
$JAVA_HOME/bin/jar cf combined.jar *
popd &>/dev/null
{jarjar} process {rules_file} $TMPDIR/combined.jar {outfile}
rm -rf $TMPDIR
""".format(
jars = " ".join([jar.path for jar in jar_files]),
java_home = str(ctx.attr._jdk[java_common.JavaRuntimeInfo].java_home),
jarjar = ctx.executable._jarjar.path,
rules_file = ctx.outputs._rules_file.path,
outfile = ctx.outputs.jar.path,
merge_meta_inf_files = " ".join(ctx.attr.merge_meta_inf_files),
)
ctx.actions.run_shell(
command = command,
inputs = [ctx.outputs._rules_file] + jar_files + ctx.files._jdk,
outputs = [ctx.outputs.jar],
tools = [ctx.executable._jarjar],
)
_jarjar_library_attrs = {
"rules": attr.string_list(),
"jars": attr.label_list(
allow_files = [".jar"],
),
"merge_meta_inf_files": attr.string_list(
allow_empty = True,
default = [],
mandatory = False,
doc = """A list of regular expressions that match files relative to the
META-INF directory that will be merged into the output jar, in addition
to files in META-INF/services. To add all files in META-INF/foo, for
example, use "foo/.*".""",
),
}
# Additional attributes only used in opensource builds
_jarjar_library_attrs.update({
"_jarjar": attr.label(
default = Label("//tools/jarjar"),
executable = True,
cfg = "host",
),
"_jdk": attr.label(
default = Label("@bazel_tools//tools/jdk:current_java_runtime"),
providers = [java_common.JavaRuntimeInfo],
),
})
jarjar_library = rule(
attrs = _jarjar_library_attrs,
outputs = {
"jar": "%{name}.jar",
"_rules_file": "%{name}.jarjar_rules",
},
implementation = _jarjar_library,
)
``` |
{
"source": "JinGyeSetBirdsFree/FudanOCR",
"score": 3
} |
#### File: component/convnet/resnet.py
```python
import torch
import torch.nn as nn
import numpy as np
import torch.nn.init as init
import torch.nn.functional as F
class BasicBlock(nn.Module):
'''
构成ResNet的残差快
'''
def __init__(self, inplanes, planes,downsample):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample != None:
residual = self.downsample(residual)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, num_in, block, layers):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(num_in, 32, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(32)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=True)
self.layer1_pool = nn.MaxPool2d((2, 2), (2, 2))
self.layer1 = self._make_layer(block, 64,128, layers[0])
self.layer1_conv = nn.Conv2d(128, 128, 3, 1, 1)
self.layer1_bn = nn.BatchNorm2d(128)
self.layer1_relu = nn.ReLU(inplace=True)
self.layer2_pool = nn.MaxPool2d((2, 2), (2, 2))
self.layer2 = self._make_layer(block, 128,256, layers[1])
self.layer2_conv = nn.Conv2d(256, 256, 3, 1, 1)
self.layer2_bn = nn.BatchNorm2d(256)
self.layer2_relu = nn.ReLU(inplace=True)
self.layer3_pool = nn.MaxPool2d((2, 2), (2, 1), (0, 1))
self.layer3 = self._make_layer(block, 256,512, layers[2])
self.layer3_conv = nn.Conv2d(512, 512, 3, 1, 1)
self.layer3_bn = nn.BatchNorm2d(512)
self.layer3_relu = nn.ReLU(inplace=True)
self.layer4 = self._make_layer(block, 512,512, layers[3])
self.layer4_conv1 = nn.Conv2d(512, 512, (2,2),(2,1),(0,1))
self.layer4_conv1_bn = nn.BatchNorm2d(512)
self.layer4_conv1_relu = nn.ReLU(inplace=True)
self.layer4_conv2 = nn.Conv2d(512, 512, 2,1,0)
self.layer4_conv2_bn = nn.BatchNorm2d(512)
self.layer4_conv2_relu = nn.ReLU(inplace=True)
# Official init from torch repo
print("Initializing ResNet18 weights...")
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, inplanes ,planes, blocks):
if inplanes != planes:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes , 3,1,1),
nn.BatchNorm2d(planes), )
else:
downsample = None
layers = []
layers.append(block(inplanes, planes,downsample))
# self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(planes, planes,downsample=None))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.layer1_pool(x)
x = self.layer1(x)
x = self.layer1_conv(x)
x = self.layer1_bn(x)
x = self.layer1_relu(x)
x = self.layer2_pool(x)
x = self.layer2(x)
x = self.layer2_conv(x)
x = self.layer2_bn(x)
x = self.layer2_relu(x)
x = self.layer3_pool(x)
x = self.layer3(x)
x = self.layer3_conv(x)
x = self.layer3_bn(x)
x = self.layer3_relu(x)
x = self.layer4(x)
x = self.layer4_conv1(x)
x = self.layer4_conv1_bn(x)
x = self.layer4_conv1_relu(x)
x = self.layer4_conv2(x)
x = self.layer4_conv2_bn(x)
x = self.layer4_conv2_relu(x)
return x
def getResNet18():
model = ResNet(num_in=1, block=BasicBlock, layers=[1, 2, 5, 3])
return model
```
#### File: component/convnet/vgg16.py
```python
import torch
import torch.nn as nn
def getVGG16(opt):
'''cnn'''
nc = opt.IMAGE.IMG_CHANNEL
'''
nm: chanel number
ks: kernel size
ps: padding size
ss: stride size
'''
nm = [64, 128, 256, 256, 512, 512, 512]
ks = [3, 3, 3, 3, 3, 3, 2]
ps = [1, 1, 1, 1, 1, 1, 0]
ss = [1, 1, 1, 1, 1, 1, 1]
cnn = nn.Sequential()
def convRelu(i, batchNormalization=False, leakyRelu=False):
nIn = nc if i == 0 else nm[i - 1]
nOut = nm[i]
cnn.add_module('conv{0}'.format(i),
nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i]))
if batchNormalization:
cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut))
if leakyRelu:
cnn.add_module('relu{0}'.format(i), nn.LeakyReLU(0.2, inplace=True))
else:
cnn.add_module('relu{0}'.format(i), nn.ReLU(True))
# 32 * 100
convRelu(0, False)
# 32 * 100
cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2))
# 16 * 50
convRelu(1, False)
# 16 * 50
cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2))
# 8 * 25
convRelu(2, True)
convRelu(3, False)
# 8 * 25
cnn.add_module('pooling{0}'.format(2), nn.MaxPool2d((2, 2), (2, 1), (0, 1)))
# # 4 * 27
convRelu(4, True)
convRelu(5, False)
# 4 * 27
cnn.add_module('pooling{0}'.format(3), nn.MaxPool2d((2, 2), (2, 1), (0, 1)))
# 2 * 29
convRelu(6, True)
# 1 * ?
# 也就是说,当图片的高为32时,经过卷积层之后,输出的特征图维度的高将变为1
return cnn
```
#### File: FudanOCR/data/generator.py
```python
class Generator(object):
"""An abstract class for text generator.
一个抽象类,用于数据集的生成,等待被实现
"""
def __getitem__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class CnumberGenerator(Generator):
def __init__(self):
self.cnum = cnumber()
def __len__(self):
return 128000
def __getitem__(self, index):
num = random.randint(100, 9999999)
if random.randint(0, 1):
num = num / 100.0
return self.cnum.cwchange(num)
class TextGenerator(Generator):
"""Invoice message txt generator
args:
texts: File path which contains
"""
def __init__(self, texts, len_thr):
super(TextGenerator, self).__init__()
self.len_thr = len_thr
with open(texts) as f:
self.texts = f.readlines()
def __getitem__(self, index):
text_len = len(self.texts[index])
if text_len > self.len_thr:
text_len = self.len_thr
return self.texts[index].strip()[0:text_len]
def __len__(self):
return len(self.texts)
def __len_thr__(self):
return self.len_thr
class PasswordGenerator(Generator):
def __init__(self):
self.fake = Faker()
self.fake.random.seed(4323)
def __getitem__(self, index):
return self.fake.password(length=10, special_chars=True, digits=True, upper_case=True, lower_case=True)
def __len__(self):
return 320000
class HyperTextGenerator(Generator):
def __init__(self, texts):
self.invoice_gen = TextGenerator(texts)
# self.passwd_gen = PasswordGenerator()
self.cnum_gen = CnumberGenerator()
def __getitem__(self, index):
rnd = random.randint(0, 1)
if rnd:
cur = index % self.invoice_gen.__len__()
return self.invoice_gen.__getitem__(cur)
else:
return self.cnum_gen.__getitem__(index)
def __len__(self):
return self.invoice_gen.__len__() + self.cnum_gen.__len__()
```
#### File: demo/client_server/e2e_demo.py
```python
import sys
sys.path.append('/home/cjy/FudanOCR/recognition_model')
sys.path.append('/home/cjy/FudanOCR/maskrcnn_benmark_architecture')
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import os
import requests
from io import BytesIO
from PIL import Image
import numpy as np
import cv2
# 调用GRCNN的部分,用于生成一个模型
from GRCNN.models import crann
from GRCNN.utils import util
from GRCNN.utils import keys
import yaml
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.transforms as transforms
# ?
# this makes our figures bigger
pylab.rcParams['figure.figsize'] = 20, 12
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
from PIL import Image
_DEBUG = True
# 这里两个部分处理的是模型的初始化
############## det model init ##############
# 检测部分的生成调用高封装接口,较为简单
# 可以通过传入一个yaml文件的形式
print('Initializing detection model...')
config_file = "/home/cjy/FudanOCR/maskrcnn_benmark_architecture/configs/text_maskrcnn_res50_fpn_lsvt.yaml"
# update the config options with the config file
cfg.merge_from_file(config_file)
# manual override some options
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
coco_demo = COCODemo(
cfg,
min_image_size=800,
confidence_threshold=0.8,
)
print('done')
############################################
############## rec model init ##############
# 初始化识别模型
# 加载参数,用yaml.load的形式就变成字典了
print('Initializing recognition model...')
config_yaml = '/home/cjy/FudanOCR/config/GRCNN_test.yaml'
f = open(config_yaml)
opt = yaml.load(f)
# 超参数
fixed_height = 32
if opt['N_GPU'] > 1:
opt['RNN']['multi_gpu'] = True
else:
opt['RNN']['multi_gpu'] = False
alphabet = keys.alphabet
nClass = len(alphabet) + 1
converter = util.strLabelConverter(alphabet)
# 用这个方式去定义一个模型
rec_model = crann.CRANN(opt, nClass).cuda()
# 这个已经准备好了
model_path = opt['CRANN']
# 加载已有的参数
if os.path.isfile(opt['CRANN']):
print("=> loading checkpoint '{}'".format(model_path))
checkpoint = torch.load(model_path)
start_epoch = checkpoint['epoch']
# best_pred = checkpoint['best_pred']
rec_model.load_state_dict(checkpoint['state_dict'])
# print("=> loaded checkpoint '{}' (epoch {} accuracy {})"
# .format(model_path, checkpoint['epoch'], best_pred))
# if _DEBUG:
# print('rec_model:', rec_model)
# 设置为 eval模式进行测试
rec_model.eval()
print('done')
############################################
def normalize(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = Image.fromarray(img)
img = torch.from_numpy(img).float().div(255)
img.sub_(0.5).div_(0.5)
return img
def load(url):
"""
Given an url of an image, downloads the image and
returns a PIL image
"""
response = requests.get(url)
pil_image = Image.open(BytesIO(response.content)).convert("RGB")
# convert to BGR format
image = np.array(pil_image)[:, :, [2, 1, 0]]
return image
def load_img(impath):
"""
Given an url of an image, downloads the image and
returns a PIL image
"""
# response = requests.get(url)
pil_image = Image.open(impath).convert("RGB")
# convert to BGR format
image = np.array(pil_image)[:, :, [2, 1, 0]]
return image
def imshow(img):
plt.imshow(img[:, :, [2, 1, 0]])
plt.axis("off")
def rotate(image, angle, ctr):
# convert to cv2 image
image = np.array(image)
(h, w) = image.shape[:2]
scale = 1.0
# set the rotation center
center = (int(ctr[0]), int(ctr[1]))
# anti-clockwise angle in the function
M = cv2.getRotationMatrix2D(center, angle, scale)
image = cv2.warpAffine(image, M, (w, h))
return image
# image process
# 核心函数
def improc(impath):
# image_dir = ''
# imlist = os.listdir(image_dir)
# for imname in imlist:
# impath = os.path.join(image_dir, imname)
# from http://cocodataset.org/#explore?id=345434
image = load_img(impath)
# imshow(image)
# 返回结果的字典
result_dict = {}
# compute predictions
# 得到一个检测结果
res_im, predictions = coco_demo.run_on_opencv_image(image)
# print('predictions:', predictions.shape)
# 从predictions抽取mask域
masks = predictions.get_field('mask')
masks_np = masks.data.cpu().numpy()
print('masks_np:', masks_np.shape)
# 返回得到三个数组
rboxes = []
rcrops = []
rpolys = []
recs = []
for i in range(masks_np.shape[0]):
mask_np = masks_np[i][0]
contours = cv2.findContours((mask_np * 1).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pts in contours[1]:
poly = pts.reshape(-1, 2)
if poly.shape[0] >= 4:
# print('polygon:', poly.shape[0])
rect = cv2.minAreaRect(poly)
poly_q = np.array(cv2.boxPoints(rect), np.int)
# print('rect:', rect)
poly_q = poly_q.reshape(-1)
pt1 = (int(poly_q[0]), int(poly_q[1]))
pt2 = (int(poly_q[2]), int(poly_q[3]))
pt3 = (int(poly_q[4]), int(poly_q[5]))
pt4 = (int(poly_q[6]), int(poly_q[7]))
edge1 = np.sqrt((pt1[0] - pt2[0]) * (pt1[0] - pt2[0]) + (pt1[1] - pt2[1]) * (pt1[1] - pt2[1]))
edge2 = np.sqrt((pt2[0] - pt3[0]) * (pt2[0] - pt3[0]) + (pt2[1] - pt3[1]) * (pt2[1] - pt3[1]))
angle = 0
if edge1 > edge2:
width = edge1
height = edge2
if pt1[0] - pt2[0] != 0:
angle = -np.arctan(float(pt1[1] - pt2[1]) / float(pt1[0] - pt2[0])) / 3.1415926 * 180
else:
angle = 90.0
elif edge2 >= edge1:
width = edge2
height = edge1
# print pt2[0], pt3[0]
if pt2[0] - pt3[0] != 0:
angle = -np.arctan(float(pt2[1] - pt3[1]) / float(pt2[0] - pt3[0])) / 3.1415926 * 180
else:
angle = 90.0
if angle < -45.0:
angle = angle + 180
x_ctr = float(pt1[0] + pt3[0]) / 2 # pt1[0] + np.abs(float(pt1[0] - pt3[0])) / 2
y_ctr = float(pt1[1] + pt3[1]) / 2 # pt1[1] + np.abs(float(pt1[1] - pt3[1])) / 2
# if height * width * (800 / float(img.shape[0])) < 16 * 16 and mode == "train":
# continue
rboxes.append([x_ctr, y_ctr, width, height, angle])
rcrops.append(
rotate(image, -angle, (x_ctr, y_ctr))
[int(y_ctr-height/2):int(y_ctr+height/2), int(x_ctr-width/2):int(x_ctr+width/2)]
)
rpolys.append(poly.tolist())
result_dict['polys'] = rpolys
cnt = 0
# crop 应该是裁剪好的图片
# 对每张crop进行识别
# 限制图片的高度为32,然后按比例放缩
for crop in rcrops:
# rec model infer
try:
re_img = cv2.resize(crop, (int(fixed_height / crop.shape[0] * crop.shape[1]), fixed_height))
except Exception as e:
# 发生异常
print('From rec:', e)
recs.append('')
continue
# 在_DEBUG模式下保存图片
if _DEBUG:
# cv2.imwrite('demo_img/crops' + str(cnt) + '.img', re_img)
re_img_pil = Image.fromarray(cv2.cvtColor(re_img, cv2.COLOR_RGB2BGR))
re_img_pil.save('demo_img/crops' + str(cnt) + '.jpg')
# cv2.waitKey(0)
cnt += 1
# re_img_th = torch.from_numpy(np.transpose(np.expand_dims(re_img, 0), (0, 3, 1, 2))).float().cuda()
re_img_th = normalize(re_img).unsqueeze(0).permute(0, 3, 1, 2).cuda()
bsz = re_img_th.size(0)
# 调用识别模型
predict = rec_model(re_img_th)
predict_len = torch.IntTensor([predict.size(0)] * bsz)
# if _DEBUG:
# print('predict:', predict.size())
# Compute accuracy
_, acc = predict.max(2)
# if int(torch.__version__.split('.')[1]) < 2:
# acc = acc.squeeze(2)
acc = acc.transpose(1, 0).contiguous().view(-1)
prob, _ = F.softmax(predict, dim=2).max(2)
probilities = torch.mean(prob, dim=1)
sim_preds = converter.decode(acc.data, predict_len.data, raw=False)
# if _DEBUG:
# print('sim_preds:', sim_preds)
recs.append(sim_preds)
# organize results in a dict
result_dict['recs'] = recs
return result_dict
```
#### File: model/demo/IC13_predict.py
```python
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '6'
import sys
sys.path.append('./maskrcnn_benchmark_architecture')
print(sys.path)
import cv2
import numpy as np
from maskrcnn_benchmark.config import cfg
from demo.predictor import ICDARDemo
def write_result_ICDAR(im_file, dets, result_dir):
file_spl = im_file.split('/')
file_name = file_spl[len(file_spl) - 1]
file_name_arr = file_name.split(".")
file_name_str = file_name_arr[0]
if not os.path.isdir(result_dir):
os.makedirs(result_dir)
result = os.path.join(result_dir, "res_" + file_name_str + ".txt")
return_bboxes = []
if not os.path.isfile(result):
os.mknod(result)
result_file = open(result, "w")
result_str = ""
for idx in range(len(dets)):
l, t, r, b = dets[idx].astype(np.int32)[0:4]
rotated_pts = [
[l, t], [r, t], [r, b], [l, b]
]
#det_str = str(int(rotated_pts[0][0])) + "," + str(int(rotated_pts[0][1])) + "," + \
# str(int(rotated_pts[1][0])) + "," + str(int(rotated_pts[1][1])) + "," + \
# str(int(rotated_pts[2][0])) + "," + str(int(rotated_pts[2][1])) + "," + \
# str(int(rotated_pts[3][0])) + "," + str(int(rotated_pts[3][1])) + "\r\n"
# rotated_pts = rotated_pts[:,0:2]
# if (dets[idx][5] > threshold):
# rotated_pts = over_bound_handle(rotated_pts, height, width)
det_str = str(int(l)) + "," + str(int(t)) + "," + \
str(int(r)) + "," + str(int(b)) + "\r\n"
result_str = result_str + det_str
return_bboxes.append(dets[idx])
# print rotated_pts.shape
result_file.write(result_str)
result_file.close()
return return_bboxes
model_file = 'text_IC13'
result_dir = os.path.join('./demo/results', model_file)
if not os.path.isdir(result_dir):
os.makedirs(result_dir)
config_file = "./maskrcnn_benchmark_architecture/configs/e2e_faster_rcnn_R_50_C4_1x_ICDAR13_test.yaml"
print('config_file:', config_file)
# update the config options with the config file
cfg.merge_from_file(config_file)
# manual override some options
cfg.merge_from_list(["MODEL.DEVICE", "cuda"])
coco_demo = ICDARDemo(
cfg,
min_image_size=800,
confidence_threshold=0.7,
)
# load image and then run prediction
image_dir = './maskrcnn_benchmark_architecture/datasets/ICDAR13/e2e/Challenge2_Test_Task12_Images/'
imlist = os.listdir(image_dir)
for image in imlist:
impath = os.path.join(image_dir, image)
print('image:', impath)
img = cv2.imread(impath)
predictions, bounding_boxes = coco_demo.run_on_opencv_image(img)
# print('predictions:', predictions.shape)
bboxes_np = bounding_boxes.bbox.data.cpu().numpy()
write_result_ICDAR(image[:-4], bboxes_np, result_dir)
#cv2.imshow('win', predictions)
#cv2.waitKey(0)
```
#### File: AdvancedEAST/network/AEast.py
```python
import torch
import torch.nn as nn
from model.detection_model.AdvancedEAST.network.resnet import resnet50
class East(nn.Module):
def __init__(self, cfg):
super(East, self).__init__()
self.resnet = resnet50(pretrained=True)
self.conv1 = nn.Conv2d(3072, 128, 1)
self.bn1 = nn.BatchNorm2d(128)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(128, 128, 3, padding=1)
self.bn2 = nn.BatchNorm2d(128)
self.relu2 = nn.ReLU()
self.conv3 = nn.Conv2d(640, 64, 1)
self.bn3 = nn.BatchNorm2d(64)
self.relu3 = nn.ReLU()
self.conv4 = nn.Conv2d(64, 64, 3, padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.relu4 = nn.ReLU()
self.conv5 = nn.Conv2d(320, 64, 1)
self.bn5 = nn.BatchNorm2d(64)
self.relu5 = nn.ReLU()
self.conv6 = nn.Conv2d(64, 32, 3, padding=1)
self.bn6 = nn.BatchNorm2d(32)
self.relu6 = nn.ReLU()
self.conv7 = nn.Conv2d(32, 32, 3, padding=1)
self.bn7 = nn.BatchNorm2d(32)
self.relu7 = nn.ReLU()
self.conv8 = nn.Conv2d(32, 1, 1)
self.conv9 = nn.Conv2d(32, 2, 1)
self.conv10 = nn.Conv2d(32, 4, 1)
self.unpool1 = nn.Upsample(scale_factor=2, mode='bilinear')
self.unpool2 = nn.Upsample(scale_factor=2, mode='bilinear')
self.unpool3 = nn.Upsample(scale_factor=2, mode='bilinear')
def forward(self, images):
f = self.resnet(images)
h = f[3] # bs 2048 w/32 h/32
g = self.unpool1(h) # bs 2048 w/16 h/16
c = self.conv1(torch.cat((g, f[2]), 1))
c = self.bn1(c)
c = self.relu1(c)
h = self.conv2(c) # bs 128 w/16 h/16
h = self.bn2(h)
h = self.relu2(h)
g = self.unpool2(h) # bs 128 w/8 h/8
c = self.conv3(torch.cat((g, f[1]), 1))
c = self.bn3(c)
c = self.relu3(c)
h = self.conv4(c) # bs 64 w/8 h/8
h = self.bn4(h)
h = self.relu4(h)
g = self.unpool3(h) # bs 64 w/4 h/4
c = self.conv5(torch.cat((g, f[0]), 1))
c = self.bn5(c)
c = self.relu5(c)
h = self.conv6(c) # bs 32 w/4 h/4
h = self.bn6(h)
h = self.relu6(h)
g = self.conv7(h) # bs 32 w/4 h/4
g = self.bn7(g)
g = self.relu7(g)
inside_score = self.conv8(g) # bs 1 w/4 h/4
side_v_code = self.conv9(g)
side_v_coord = self.conv10(g)
east_detect = torch.cat((inside_score, side_v_code, side_v_coord), 1)
# transpose for loss calculation
return east_detect.transpose(1, 2).transpose(2, 3)
```
#### File: AdvancedEAST/tools/polygon_wrapper.py
```python
import numpy as np
from skimage.draw import polygon
"""
:param det_x: [1, N] Xs of detection's vertices
:param det_y: [1, N] Ys of detection's vertices
:param gt_x: [1, N] Xs of groundtruth's vertices
:param gt_y: [1, N] Ys of groundtruth's vertices
##############
All the calculation of 'AREA' in this script is handled by:
1) First generating a binary mask with the polygon area filled up with 1's
2) Summing up all the 1's
"""
def area(x, y):
"""
This helper calculates the area given x and y vertices.
"""
ymax = np.max(y)
xmax = np.max(x)
bin_mask = np.zeros((ymax, xmax))
rr, cc = polygon(y, x)
bin_mask[rr, cc] = 1
area = np.sum(bin_mask)
return area
# return np.round(area, 2)
def approx_area_of_intersection(det_x, det_y, gt_x, gt_y):
"""
This helper determine if both polygons are intersecting with each others with an approximation method.
Area of intersection represented by the minimum bounding rectangular [xmin, ymin, xmax, ymax]
"""
det_ymax = np.max(det_y)
det_xmax = np.max(det_x)
det_ymin = np.min(det_y)
det_xmin = np.min(det_x)
gt_ymax = np.max(gt_y)
gt_xmax = np.max(gt_x)
gt_ymin = np.min(gt_y)
gt_xmin = np.min(gt_x)
all_min_ymax = np.minimum(det_ymax, gt_ymax)
all_max_ymin = np.maximum(det_ymin, gt_ymin)
intersect_heights = np.maximum(0.0, (all_min_ymax - all_max_ymin))
all_min_xmax = np.minimum(det_xmax, gt_xmax)
all_max_xmin = np.maximum(det_xmin, gt_xmin)
intersect_widths = np.maximum(0.0, (all_min_xmax - all_max_xmin))
return intersect_heights * intersect_widths
def area_of_intersection(det_x, det_y, gt_x, gt_y):
"""
This helper calculates the area of intersection.
"""
if approx_area_of_intersection(det_x, det_y, gt_x, gt_y) > 1: # only proceed if it passes the approximation test
ymax = np.maximum(np.max(det_y), np.max(gt_y)) + 1
xmax = np.maximum(np.max(det_x), np.max(gt_x)) + 1
bin_mask = np.zeros((ymax, xmax))
det_bin_mask = np.zeros_like(bin_mask)
gt_bin_mask = np.zeros_like(bin_mask)
rr, cc = polygon(det_y, det_x)
det_bin_mask[rr, cc] = 1
rr, cc = polygon(gt_y, gt_x)
gt_bin_mask[rr, cc] = 1
final_bin_mask = det_bin_mask + gt_bin_mask
inter_map = np.where(final_bin_mask == 2, 1, 0)
inter = np.sum(inter_map)
return inter
# return np.round(inter, 2)
else:
return 0
def iou(det_x, det_y, gt_x, gt_y):
"""
This helper determine the intersection over union of two polygons.
"""
# only proceed if it passes the approximation test
if approx_area_of_intersection(det_x, det_y, gt_x, gt_y) > 1:
ymax = np.maximum(np.max(det_y), np.max(gt_y)) + 1
xmax = np.maximum(np.max(det_x), np.max(gt_x)) + 1
bin_mask = np.zeros((ymax, xmax))
det_bin_mask = np.zeros_like(bin_mask)
gt_bin_mask = np.zeros_like(bin_mask)
rr, cc = polygon(det_y, det_x)
det_bin_mask[rr, cc] = 1
rr, cc = polygon(gt_y, gt_x)
gt_bin_mask[rr, cc] = 1
final_bin_mask = det_bin_mask + gt_bin_mask
# inter_map = np.zeros_like(final_bin_mask)
inter_map = np.where(final_bin_mask == 2, 1, 0)
inter = np.sum(inter_map)
# union_map = np.zeros_like(final_bin_mask)
union_map = np.where(final_bin_mask > 0, 1, 0)
union = np.sum(union_map)
return inter / float(union + 1.0)
else:
return 0
def iod(det_x, det_y, gt_x, gt_y):
"""
This helper determine the fraction of intersection area over detection area
"""
if approx_area_of_intersection(det_x, det_y, gt_x, gt_y) > 1: # only proceed if it passes the approximation test
ymax = np.maximum(np.max(det_y), np.max(gt_y)) + 1
xmax = np.maximum(np.max(det_x), np.max(gt_x)) + 1
bin_mask = np.zeros((ymax, xmax))
det_bin_mask = np.zeros_like(bin_mask)
gt_bin_mask = np.zeros_like(bin_mask)
rr, cc = polygon(det_y, det_x)
det_bin_mask[rr, cc] = 1
rr, cc = polygon(gt_y, gt_x)
gt_bin_mask[rr, cc] = 1
final_bin_mask = det_bin_mask + gt_bin_mask
inter_map = np.where(final_bin_mask == 2, 1, 0)
inter = np.round(np.sum(inter_map), 2)
det = np.round(np.sum(det_bin_mask), 2)
return inter / float(det + 1.0)
else:
return 0
```
#### File: AdvancedEAST/utils/utils.py
```python
import os
import torch
import datetime
import numpy as np
from tqdm import tqdm
import model.detection_model.AdvancedEAST.config as cfg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_log(losses, epoch, current_batch, loader_len, tock, split='Training'):
'''Save log of losses.'''
if not os.path.exists(cfg.result_dir):
os.mkdir(cfg.result_dir)
log_path = os.path.join(cfg.result_dir, tock + '-log.txt')
with open(log_path, 'a') as f:
line = 'Epoch: [{0}][{1}/{2}] {3} Loss {loss.val:.4f} ({loss.avg:.4f})\n'.format(
epoch + 1, current_batch, loader_len, split, loss=losses)
f.write(line)
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError(
'init_type [%s] not implemented.' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func)
```
#### File: EAST/calculate/Pascal_VOC.py
```python
from os import listdir
from scipy import io
import json
import numpy as np
from polygon_wrapper import iou
from polygon_wrapper import iod
input_json_path = '/workspace/mnt/group/general-reg/denglei/code/EAST/calculate/LSVT_result_gtgen.json'
gt_json_path = '/workspace/mnt/group/general-reg/denglei/code/EAST/calculate/train_full_labels.json'
iou_threshold = 0.5
def input_reading(polygons):
det = []
for polygon in polygons:
polygon['points'] = np.array(polygon['points'])
det.append(polygon)
return det
def gt_reading(gt_dict, img_key):
polygons = gt_dict[img_key]
gt = []
for polygon in polygons:
polygon['points'] = np.array(polygon['points'])
gt.append(polygon)
return gt
def detection_filtering(detections, groundtruths, threshold=0.5):
"""ignore detected illegal text region"""
before_filter_num = len(detections)
for gt_id, gt in enumerate(groundtruths):
if (gt['transcription'] == '###') and (gt['points'].shape[1] > 1):
gt_x = list(map(int, np.squeeze(gt['points'][:, 0])))
gt_y = list(map(int, np.squeeze(gt['points'][:, 1])))
for det_id, detection in enumerate(detections):
det_x = list(map(int, np.squeeze(detection['points'][:, 0])))
det_y = list(map(int, np.squeeze(detection['points'][:, 1])))
det_gt_iou = iod(det_x, det_y, gt_x, gt_y)
if det_gt_iou > threshold:
detections[det_id] = []
detections[:] = [item for item in detections if item != []]
# if before_filter_num - len(detections) > 0:
# print("Ignore {} illegal detections".format(before_filter_num - len(detections)))
return detections
def gt_filtering(groundtruths):
before_filter_num = len(groundtruths)
for gt_id, gt in enumerate(groundtruths):
if gt['transcription'] == '###' or gt['points'].shape[0] < 3:
groundtruths[gt_id] = []
groundtruths[:] = [item for item in groundtruths if item != []]
# if before_filter_num - len(groundtruths) > 0:
# print("Ignore {} illegal groundtruths".format(before_filter_num - len(groundtruths)))
return groundtruths
def output(json_target,json_gt):
# Initial config
global_tp = 0
global_fp = 0
global_fn = 0
# load json file as dict
with open(json_target, 'r') as f:
input_dict = json.load(f)
with open(json_gt, 'r') as f:
gt_dict = json.load(f)
false = open('./false.txt', 'w')
for input_img_key, input_cnts in input_dict.items():
detections = input_reading(input_cnts)
groundtruths = gt_reading(gt_dict, input_img_key.replace('res', 'gt'))
detections = detection_filtering(detections, groundtruths) # filters detections overlapping with DC area
groundtruths = gt_filtering(groundtruths)
iou_table = np.zeros((len(groundtruths), len(detections)))
det_flag = np.zeros((len(detections), 1))
gt_flag = np.zeros((len(groundtruths), 1))
tp = 0
fp = 0
fn = 0
for gt_id, gt in enumerate(groundtruths):
gt_x = list(map(int, np.squeeze(gt['points'][:, 0])))
gt_y = list(map(int, np.squeeze(gt['points'][:, 1])))
if len(detections) > 0:
for det_id, detection in enumerate(detections):
det_x = list(map(int, np.squeeze(detection['points'][:, 0])))
det_y = list(map(int, np.squeeze(detection['points'][:, 1])))
iou_table[gt_id, det_id] = iou(det_x, det_y, gt_x, gt_y)
best_matched_det_id = np.argmax(
iou_table[gt_id, :]) # identified the best matched detection candidates with current groundtruth
matched_id = np.where(iou_table[gt_id, :] >= iou_threshold)
if iou_table[gt_id, best_matched_det_id] >= iou_threshold:
if matched_id[0].shape[0] < 2:
tp = tp + 1.0
global_tp = global_tp + 1.0
det_flag[best_matched_det_id] = 1
gt_flag[gt_id] = 1
else:
tp = tp + 1.0
global_tp = global_tp + 1.0
det_flag[best_matched_det_id] = 1
gt_flag[gt_id] = 1
# if there are more than 1 matched detection, only 1 is contributed to tp, the rest are fp
fp = fp + (matched_id[0].shape[0] - 1.0)
# Update local and global tp, fp, and fn
inv_gt_flag = 1 - gt_flag
fn = np.sum(inv_gt_flag)
inv_det_flag = 1 - det_flag
fp = fp + np.sum(inv_det_flag)
global_fp = global_fp + fp
global_fn = global_fn + fn
if tp + fp == 0:
local_precision = 0
else:
local_precision = tp / (tp + fp)
if tp + fn == 0:
local_recall = 0
else:
local_recall = tp / (tp + fn)
if local_precision == 0 and local_recall == 0:
false.write('hello')
false.write(input_img_key.replace('res', 'gt') + '.jpg')
false.write('\n')
else:
local_fscore = 2 * local_precision * local_recall / (local_precision + local_recall)
if local_fscore < 0.3:
false.write(input_img_key.replace('res', 'gt') + '.jpg')
false.write('\n')
print('{0:12} Precision: {1:.4f}, Recall: {2:.4f}'.format(input_img_key.replace('res', 'gt') + '.jpg',
local_precision, local_recall))
global_precision = global_tp / (global_tp + global_fp)
global_recall = global_tp / (global_tp + global_fn)
f_score = 2 * global_precision * global_recall / (global_precision + global_recall)
print('Global Precision: {:.4f}, Recall: {:.4f}, F_score: {:.4f}'.format(global_precision, global_recall, f_score))
print('over')
if __name__ == '__main__':
# Initial config
global_tp = 0
global_fp = 0
global_fn = 0
# load json file as dict
with open(input_json_path, 'r') as f:
input_dict = json.load(f)
with open(gt_json_path, 'r') as f:
gt_dict = json.load(f)
false = open('./false.txt','w')
for input_img_key, input_cnts in input_dict.items():
detections = input_reading(input_cnts)
groundtruths = gt_reading(gt_dict, input_img_key.replace('res', 'gt'))
detections = detection_filtering(detections, groundtruths) # filters detections overlapping with DC area
groundtruths = gt_filtering(groundtruths)
iou_table = np.zeros((len(groundtruths), len(detections)))
det_flag = np.zeros((len(detections), 1))
gt_flag = np.zeros((len(groundtruths), 1))
tp = 0
fp = 0
fn = 0
for gt_id, gt in enumerate(groundtruths):
gt_x = list(map(int, np.squeeze(gt['points'][:, 0])))
gt_y = list(map(int, np.squeeze(gt['points'][:, 1])))
if len(detections) > 0:
for det_id, detection in enumerate(detections):
det_x = list(map(int, np.squeeze(detection['points'][:, 0])))
det_y = list(map(int, np.squeeze(detection['points'][:, 1])))
iou_table[gt_id, det_id] = iou(det_x, det_y, gt_x, gt_y)
best_matched_det_id = np.argmax(
iou_table[gt_id, :]) # identified the best matched detection candidates with current groundtruth
matched_id = np.where(iou_table[gt_id, :] >= iou_threshold)
if iou_table[gt_id, best_matched_det_id] >= iou_threshold:
if matched_id[0].shape[0] < 2:
tp = tp + 1.0
global_tp = global_tp + 1.0
det_flag[best_matched_det_id] = 1
gt_flag[gt_id] = 1
else:
tp = tp + 1.0
global_tp = global_tp + 1.0
det_flag[best_matched_det_id] = 1
gt_flag[gt_id] = 1
# if there are more than 1 matched detection, only 1 is contributed to tp, the rest are fp
fp = fp + (matched_id[0].shape[0] - 1.0)
# Update local and global tp, fp, and fn
inv_gt_flag = 1 - gt_flag
fn = np.sum(inv_gt_flag)
inv_det_flag = 1 - det_flag
fp = fp + np.sum(inv_det_flag)
global_fp = global_fp + fp
global_fn = global_fn + fn
if tp + fp == 0:
local_precision = 0
else:
local_precision = tp / (tp + fp)
if tp + fn == 0:
local_recall = 0
else:
local_recall = tp / (tp + fn)
if local_precision == 0 and local_recall == 0:
false.write('hello')
false.write(input_img_key.replace('res', 'gt') + '.jpg')
false.write('\n')
else:
local_fscore = 2*local_precision*local_recall/(local_precision+local_recall)
if local_fscore < 0.3:
false.write(input_img_key.replace('res', 'gt') + '.jpg')
false.write('\n')
print('{0:12} Precision: {1:.4f}, Recall: {2:.4f}'.format(input_img_key.replace('res', 'gt') + '.jpg',
local_precision, local_recall))
global_precision = global_tp / (global_tp + global_fp)
global_recall = global_tp / (global_tp + global_fn)
f_score = 2 * global_precision * global_recall / (global_precision + global_recall)
print('Global Precision: {:.4f}, Recall: {:.4f}, F_score: {:.4f}'.format(global_precision, global_recall, f_score))
print('over')
```
#### File: detection_model/EAST/multigpu_train.py
```python
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim
import cv2
tf.app.flags.DEFINE_integer('input_size', 512, '')
tf.app.flags.DEFINE_integer('batch_size_per_gpu', 14, '')
tf.app.flags.DEFINE_integer('num_readers', 16, '')
tf.app.flags.DEFINE_float('learning_rate', 0.0001, '')
tf.app.flags.DEFINE_integer('max_steps', 20000, '')
tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '')
tf.app.flags.DEFINE_string('gpu_list', '3', '')
tf.app.flags.DEFINE_string('checkpoint_path', '/home/cjy/east_icdar2015_resnet_v1_50_rbox/', '')
tf.app.flags.DEFINE_boolean('restore', False, 'whether to restore from checkpoint')
tf.app.flags.DEFINE_integer('save_checkpoint_steps', 2000, '')
tf.app.flags.DEFINE_integer('save_summary_steps', 100, '')
tf.app.flags.DEFINE_string('pretrained_model_path','/home/cjy/resnet_v1_50_2016_08_28/resnet_v1_50.ckpt', '')
import model
import icdar
FLAGS = tf.app.flags.FLAGS
gpus = list(range(len(FLAGS.gpu_list.split(','))))
def tower_loss(images, score_maps1, geo_maps1, training_masks1, score_maps2, geo_maps2, training_masks2, reuse_variables=None):
# Build inference graph
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
f_score, f_geometry = model.model(images, is_training=True)
model_loss1 = model.loss(score_maps1, f_score['F_score1'],
geo_maps1, f_geometry['F_geometry1'],
training_masks1)
model_loss2 = model.loss(score_maps2, f_score['F_score2'],
geo_maps2, f_geometry['F_geometry2'],
training_masks2)
model_loss = model_loss1 + model_loss2
total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
# add summary
if reuse_variables is None:
# tf.summary.image('input', images)
# tf.summary.image('score_map', score_maps)
# tf.summary.image('score_map_pred', f_score * 255)
# tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1])
# tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1])
# tf.summary.image('training_masks', training_masks)
tf.summary.scalar('model_loss1', model_loss1)
tf.summary.scalar('model_loss2', model_loss2)
tf.summary.scalar('model_loss', model_loss)
tf.summary.scalar('total_loss', total_loss)
return total_loss, model_loss
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def main(argv=None):
import os
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list
if not tf.gfile.Exists(FLAGS.checkpoint_path):
tf.gfile.MkDir(FLAGS.checkpoint_path)
else:
if not FLAGS.restore:
tf.gfile.DeleteRecursively(FLAGS.checkpoint_path)
tf.gfile.MkDir(FLAGS.checkpoint_path)
input_images = tf.placeholder(tf.float32, shape=[None, 512, 512, 3], name='input_images')
input_score_maps1 = tf.placeholder(tf.float32, shape=[None, 128, 128, 1], name='input_score_maps1')
input_score_maps2 = tf.placeholder(tf.float32, shape=[None, 64, 64, 1], name='input_score_maps2')
if FLAGS.geometry == 'RBOX':
input_geo_maps1 = tf.placeholder(tf.float32, shape=[None, 128, 128, 5], name='input_geo_maps1')
input_geo_maps2 = tf.placeholder(tf.float32, shape=[None, 64, 64, 5], name='input_geo_maps2')
else:
input_geo_maps1 = tf.placeholder(tf.float32, shape=[None, 128, 128, 8], name='input_geo_maps1')
input_geo_maps2 = tf.placeholder(tf.float32, shape=[None, 64, 64, 8], name='input_geo_maps2')
input_training_masks1 = tf.placeholder(tf.float32, shape=[None, 128, 128, 1], name='input_training_masks1')
input_training_masks2 = tf.placeholder(tf.float32, shape=[None, 64, 64, 1], name='input_training_masks2')
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, global_step, decay_steps=2000, decay_rate=0.94, staircase=True)
# add summary
tf.summary.scalar('learning_rate', learning_rate)
opt = tf.train.AdamOptimizer(learning_rate)
# opt = tf.train.MomentumOptimizer(learning_rate, 0.9)
# split
print('gpu',len(gpus))
input_images_split = tf.split(input_images, len(gpus))
input_score_maps_split1 = tf.split(input_score_maps1, len(gpus))
input_geo_maps_split1 = tf.split(input_geo_maps1, len(gpus))
input_training_masks_split1 = tf.split(input_training_masks1, len(gpus))
input_score_maps_split2 = tf.split(input_score_maps2, len(gpus))
input_geo_maps_split2 = tf.split(input_geo_maps2, len(gpus))
input_training_masks_split2 = tf.split(input_training_masks2, len(gpus))
tower_grads = []
reuse_variables = None
for i, gpu_id in enumerate(gpus):
with tf.device('/gpu:%d' % gpu_id):
with tf.name_scope('model_%d' % gpu_id) as scope:
iis = input_images_split[i]
isms1 = input_score_maps_split1[i]
igms1 = input_geo_maps_split1[i]
itms1 = input_training_masks_split1[i]
isms2 = input_score_maps_split2[i]
igms2 = input_geo_maps_split2[i]
itms2 = input_training_masks_split2[i]
total_loss, model_loss = tower_loss(iis, isms1, igms1, itms1, isms2, igms2, itms2, reuse_variables)
batch_norm_updates_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope))
reuse_variables = True
grads = opt.compute_gradients(total_loss)
tower_grads.append(grads)
grads = average_gradients(tower_grads)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
summary_op = tf.summary.merge_all()
# save moving average
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# batch norm updates
with tf.control_dependencies([variables_averages_op, apply_gradient_op, batch_norm_updates_op]):
train_op = tf.no_op(name='train_op')
saver = tf.train.Saver(tf.global_variables())
summary_writer = tf.summary.FileWriter(FLAGS.checkpoint_path, tf.get_default_graph())
init = tf.global_variables_initializer()
if FLAGS.pretrained_model_path is not None:
variable_restore_op = slim.assign_from_checkpoint_fn(FLAGS.pretrained_model_path, slim.get_trainable_variables(),
ignore_missing_vars=True)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
if FLAGS.restore:
print('continue training from previous checkpoint')
ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
saver.restore(sess, ckpt)
else:
sess.run(init)
if FLAGS.pretrained_model_path is not None:
variable_restore_op(sess)
data_generator = icdar.get_batch(num_workers=FLAGS.num_readers,
input_size=FLAGS.input_size,
batch_size=FLAGS.batch_size_per_gpu * len(gpus))
start = time.time()
for step in range(FLAGS.max_steps):
data = next(data_generator)
# print('hello:',data[2]['score_map1'][0].shape)
# print('hello:',data[2]['score_map2'][0].shape)
# print('hello:',data[3]['geo_map1'][0].shape)
# print('hello:',data[3]['geo_map2'][0].shape)
# debug
#import cv2
# print(type(data[0]))
#cv2.imwrite('input.jpg', data[0][0])
ml, tl, _ = sess.run([model_loss, total_loss, train_op], feed_dict={input_images: data[0],
input_score_maps1: data[2]['score_map1'],
input_geo_maps1: data[3]['geo_map1'],
input_training_masks1: data[4]['training_mask1'],
input_score_maps2: data[2]['score_map2'],
input_geo_maps2: data[3]['geo_map2'],
input_training_masks2: data[4]['training_mask2']})
if np.isnan(tl):
print('Loss diverged, stop training')
break
if step % 10 == 0:
avg_time_per_step = (time.time() - start)/10
avg_examples_per_second = (10 * FLAGS.batch_size_per_gpu * len(gpus))/(time.time() - start)
start = time.time()
print('Step {:06d}, model loss {:.4f}, total loss {:.4f}, {:.2f} seconds/step, {:.2f} examples/second'.format(
step, ml, tl, avg_time_per_step, avg_examples_per_second))
if step % FLAGS.save_checkpoint_steps == 0:
saver.save(sess, FLAGS.checkpoint_path + 'model.ckpt', global_step=global_step)
if step % FLAGS.save_summary_steps == 0:
_, tl, summary_str = sess.run([train_op, total_loss, summary_op], feed_dict={input_images: data[0],
input_score_maps1: data[2]['score_map1'],
input_geo_maps1: data[3]['geo_map1'],
input_training_masks1: data[4]['training_mask1'],
input_score_maps2: data[2]['score_map2'],
input_geo_maps2: data[3]['geo_map2'],
input_training_masks2: data[4]['training_mask2']})
summary_writer.add_summary(summary_str, global_step=step)
if __name__ == '__main__':
tf.app.run()
```
#### File: lib/model/focal_loss.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=0.25, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha,(float,int)): self.alpha = torch.Tensor([alpha,1-alpha])
if isinstance(alpha,list): self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim()>2:
input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
input = input.transpose(1,2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1,1)
logpt = None
loss = None
logpt = F.log_softmax(input)
logpt = logpt.gather(1,target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
if self.alpha is not None:
if self.alpha.type()!=input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0,target.data.view(-1))
logpt = logpt * Variable(at)
loss = -1 * (1-pt)**self.gamma * logpt
if self.size_average: return loss.mean()
else: return loss.sum()
```
#### File: lib/model/networkOptimier.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .roi_align.modules.roi_align import RoIAlign
from torch.utils.data import Dataset, DataLoader
from lib.model.logger import Logger
from torchvision import transforms
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import os
from lib.model.networkFactory import networkFactory
import torch.nn.functional as F
import logging
import time
import random
import pickle
from lib.datasets.ctw import CTWDataset
from lib.datasets.syntext import SynthtextDataset
from lib.datasets.totaltext import TotalTextDataset, ToTensor
# from lib.datasets.syntext import SynthtextDataset, ToTensor
from lib.model.focal_loss import FocalLoss
from lib.model.unet.unet_model import UNet
from config.config import config
import cv2
def toNp(x):
return x.data.cpu().numpy()
def toVar(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
class networkOptimier(object):
"""docstring for TrainModel"""
def __init__(self, trainDatasetroot, testDatasetroot, modelHome, outputModelHome, outputLogHome,net='vgg16',data='ctw',GPUID = 0,resize_type = 'normal'):
super(networkOptimier, self).__init__()
torch.cuda.set_device(GPUID)
self.strides = [8,16,32,64]
self.data = data
self.net = net
traindataTransform = transforms.Compose([ToTensor(self.strides)])
if data == 'ctw':
trainDataset = CTWDataset(trainDatasetroot, traindataTransform,self.strides,istraining=True,resize_type=resize_type)
elif data == 'synthtext':
trainDataset = SynthtextDataset(trainDatasetroot, traindataTransform,self.strides,istraining=False,data='synthtext')
elif data == 'icdar':
trainDataset = SynthtextDataset(trainDatasetroot, traindataTransform,self.strides,istraining=True,data='icdar')
elif data == 'totaltext':
trainDataset = TotalTextDataset(trainDatasetroot, traindataTransform,self.strides,istraining=True,data='totaltext')
traindataloader = DataLoader(trainDataset, batch_size=1,shuffle=True, num_workers=5)
# self.dataloader = {'train':traindataloader}
testdataTransform = transforms.Compose([ToTensor(self.strides)])
if data == 'ctw':
testDataset = CTWDataset(testDatasetroot, testdataTransform,self.strides,istraining=False,resize_type=resize_type)
elif data == 'synthtext':
testDataset = CTWDataset(testDatasetroot, testdataTransform,self.strides,istraining=False,data='synthtext')
elif data == 'icdar':
testDataset = SynthtextDataset(testDatasetroot, testdataTransform,self.strides,istraining=False,data='icdar')
elif data == 'totaltext':
testDataset = TotalTextDataset(testDatasetroot, testdataTransform,self.strides,istraining=False,data='totaltext')
testdataloader = DataLoader(testDataset, batch_size=1,shuffle=False, num_workers=5)
# self.dataloader = {'test':traindataloader}
self.dataloader = {'train': traindataloader, 'val': testdataloader}
# tensorboard log and step lo
if not os.path.exists(outputLogHome):
os.makedirs(outputLogHome)
self.logger = Logger(outputLogHome)
nf = networkFactory(modelHome)
if net == 'vgg16':
self.model = nf.vgg16()
elif net == 'resnet34':
self.model = nf.resnet34()
elif net == 'resnet50':
self.model = nf.resnet50()
elif net == 'unet':
self.model = UNet(3,1)
elif net == 'resnet50_mask':
self.model = nf.resnet50_mask()
print(self.model)
if not os.path.exists(outputModelHome):
os.makedirs(outputModelHome)
if not os.path.exists(outputLogHome):
os.makedirs(outputLogHome)
self.outputModelHome = outputModelHome
self.outputLogHome = outputLogHome
self.curEpoch = 0
self.optimizer = None
# self.focal_loss = FocalLoss()
self.circle_cls_loss_function = nn.CrossEntropyLoss(ignore_index=-1)
self.anchor_cls_loss_function = FocalLoss()
self.mask_loss = FocalLoss()
self.roi_align = RoIAlign(28, 28, 0)
self.image_mask_loss = nn.SmoothL1Loss()
def load(self, modelPath=None):
if modelPath is not None:
pretrainedDict = torch.load(modelPath)
modelDict = self.model.state_dict()
pretrainedDict = {k: v for k,
v in pretrainedDict.items() if k in modelDict}
modelDict.update(pretrainedDict)
self.model.load_state_dict(modelDict)
print('Load model:{}'.format(modelPath))
def save(self, modelPath, epoch):
modelPath = modelPath + '/{}.model'.format(str(epoch))
print('\nsave model {} in {}'.format(str(epoch), modelPath))
torch.save(self.model.state_dict(), modelPath)
def updateLR(self, baselr, epoch, steps, decayRate):
param = 1
for step in steps:
if epoch >= step:
param += 1
else:
break
for paramGroup in self.optimizer.param_groups:
paramGroup['lr'] = baselr * decayRate**param
def trainval(self, modelPath=None, epoch=0, maxEpoch=1000, baselr=0.001, steps=[1000], decayRate=0.1, valDuration=1000, snapshot=5):
if modelPath is not None:
self.load(modelPath)
self.curEpoch = epoch
if self.optimizer is None:
# params = []
# for param in self.model.parameters():
# if param.requires_grad:
# params.append(param)
# else:
# print('No')
print(self.model.named_parameters())
self.optimizer = optim.Adam(self.model.parameters(), lr=baselr)
self.model = self.model.cuda()
while self.curEpoch < maxEpoch:
self.curEpoch += 1
for phase in ['train','val']:
startEpochTime = time.time()
Epoch_circle_cls_Loss = {}
# Epoch_circle_reg_Loss = {}
# Epoch_anchor_cls_Loss = {}
# Epoch_anchor_reg_Loss = {}
Epoch_mask_loss = 0
Epoch_image_mask_loss = {}
for stride in self.strides:
Epoch_circle_cls_Loss[stride] = 0
Epoch_image_mask_loss[stride] = 0
numOfImage = 0
startImgTime = time.time()
datasample = self.dataloader[phase]
imagenum = datasample.__len__()
if phase == 'val' and self.curEpoch % valDuration != 0:
continue
for sample in datasample:
try:
if sample == "FAIL":
continue
if len(sample) == 0:
continue
if phase == 'train':
isTraining = True
self.updateLR(baselr, self.curEpoch,
steps, decayRate)
self.model.train(True)
else:
isTraining = False
self.model.eval()
numOfImage += 1
image = sample['image']
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
showimage = image.squeeze().numpy().transpose((1, 2, 0))
# pixel_means = np.array([[[102.9801, 115.9465, 122.7717]]])
showimage = showimage*std+mean
showimage = showimage.astype(np.uint8,copy=True)
circle_labels = {}
# circle_regres = {}
# anchor_labels = {}
# anchor_regres = {}
# anchor_positive_weights = {}
# anchor_negative_weights = {}
image = Variable(image.cuda(),requires_grad=False)
# mask = sample['mask']
# mask = Variable(mask.cuda(),requires_grad=False)
# resize_mask = sample['resize_mask']
# resize_mask = Variable(resize_mask.cuda(),requires_grad=False)
all_circle = {}
all_mask = {}
mask_gt = {}
for stride in self.strides:
circle_labels[str(stride)] = Variable(sample['labels_stride_'+str(stride)].squeeze().cuda(),requires_grad=False)
all_circle[str(stride)] = Variable(sample['anchors_stride_'+str(stride)].squeeze().cuda(),requires_grad=False)
all_mask[str(stride)] = Variable(sample['mask_stride_'+str(stride)].squeeze().cuda(),requires_grad=False)
if self.net == 'resnet50_mask':
mask_gt[str(stride)] = Variable(sample['mask'+str(stride)].squeeze().cuda(),requires_grad=False)
self.optimizer.zero_grad()
if self.net == 'resnet50_mask':
circle_labels_pred,pred_mask,bbox,bbox_idx,pos_idx_stride,bbox_score,mask_labels= self.model.forward(image,all_circle,circle_labels,threshold=0.4,istraining=isTraining)
else:
circle_labels_pred,pred_mask,bbox,bbox_idx,pos_idx_stride,bbox_score= self.model.forward(image,all_circle,circle_labels,threshold=0.4,istraining=isTraining) #
# backward
loss = None
losses = {}
mask_label = None
# losses['image_mask'] = self.image_mask_loss(image_mask,resize_mask)
# Epoch_image_mask_loss = Epoch_image_mask_loss+toNp(losses['image_mask'])
for stride in self.strides:
# circle cls
# print(circle_labels_pred[stride],circle_labels[stride])
pred_labels = circle_labels_pred[str(stride)]
target_labels = circle_labels[str(stride)]
label_temploss = None
# print(pred_labels,target_labels)
if str(stride) in pos_idx_stride:
stride_mask = all_mask[str(stride)][pos_idx_stride[str(stride)]]
if type(mask_label) == type(None):
mask_label = stride_mask
else:
mask_label = torch.cat((mask_label,stride_mask),0)
label_temploss = self.anchor_cls_loss_function(pred_labels,target_labels)#self.circle_cls_loss_function(pred_labels,target_labels)
# print(label_temploss)
if self.net == 'resnet50_mask':
losses['seg_'+str(stride)] = F.smooth_l1_loss(mask_labels[str(stride)],mask_gt[str(stride)])
Epoch_image_mask_loss[stride] = Epoch_circle_cls_Loss[stride]+toNp(losses['seg_'+str(stride)])
losses['cls_'+str(stride)]=label_temploss
Epoch_circle_cls_Loss[stride] = Epoch_circle_cls_Loss[stride]+toNp(losses['cls_'+str(stride)])
if not type(mask_label) == type(None):
mask_label = mask_label.squeeze()
## show mask
## ============
pred_mask = pred_mask
# print(mask_label.size(),pred_mask.size())
losses['mask'] = F.smooth_l1_loss(pred_mask,mask_label)
# losses['mask'] = F.cross_entropy(pred_mask,mask_label)
Epoch_mask_loss = Epoch_mask_loss+toNp(losses['mask'])
for key in losses:
if type(loss) == type(None):
loss = losses[key]
else:
loss+=losses[key]
# loss = losses['mask']
# print(loss)
# print(Epoch_circle_cls_Loss,Epoch_circle_reg_Loss)
if phase == 'train':
loss.backward()
self.optimizer.step()
# torch.cuda.empty_cache()
else:
self.optimizer.zero_grad()
# del loss,mask_label,circle_labels_pred,pred_mask,bbox,bbox_idx,pos_idx_stride,bbox_score
# print(self.curEpoch,losses)
print('\rnum:{}/{}'.format(str(numOfImage).zfill(3),imagenum)+" time:"+str(round(time.time()-startImgTime, 2))+" "+str(image.size(2))+"*"+str(image.size(3))+" ",end='')
if self.data == 'synthtext':
if numOfImage%10000 == 0:
self.save(self.outputModelHome, (numOfImage*self.curEpoch)//10000)
endEpochTime = time.time()
if phase == 'train':
print("\n=================Epoch {} time:{:.2f}===============\n".format(
self.curEpoch, endEpochTime-startEpochTime))
else:
print("\n==================Test {:.2f}==================\n".format(
endEpochTime-startEpochTime))
startEpochTime = endEpochTime
print("time:"+str(round(time.time()-startImgTime, 2))+ \
"\ncircle_cls_loss:{} {} {} {}".format(str(Epoch_circle_cls_Loss[8]).zfill(3),str(Epoch_circle_cls_Loss[16]).zfill(3),str(Epoch_circle_cls_Loss[32]).zfill(3),str(Epoch_circle_cls_Loss[64]).zfill(3))+\
"\nmask_loss:{}".format(str(Epoch_mask_loss).zfill(3)))
self.logger.scalar_summary(phase+'_mask_'+str(stride), Epoch_mask_loss,(numOfImage*self.curEpoch)//10000)
for stride in self.strides:
self.logger.scalar_summary(phase+'_circle_cls_'+str(stride), Epoch_circle_cls_Loss[stride], (numOfImage*self.curEpoch)//10000)
Epoch_circle_cls_Loss = {}
for stride in self.strides:
Epoch_circle_cls_Loss[stride] = 0
Epoch_mask_loss=0
except:
continue
if self.curEpoch % snapshot == 0 and phase == 'train':
self.save(self.outputModelHome, self.curEpoch)
endEpochTime = time.time()
if phase == 'train':
print("\n=================Epoch {} time:{:.2f}===============\n".format(
self.curEpoch, endEpochTime-startEpochTime))
else:
print("\n==================Test {:.2f}==================\n".format(
endEpochTime-startEpochTime))
startEpochTime = endEpochTime
if self.net == 'resnet50_mask':
print("time:"+str(round(time.time()-startImgTime, 2))+ \
"\ncircle_cls_loss:{} {} {} {}".format(str(Epoch_circle_cls_Loss[8]).zfill(3),str(Epoch_circle_cls_Loss[16]).zfill(3),str(Epoch_circle_cls_Loss[32]).zfill(3),str(Epoch_circle_cls_Loss[64]).zfill(3))+\
"\nmask_strid_loss:{} {} {} {}".format(str(Epoch_image_mask_loss[8]).zfill(3),str(Epoch_image_mask_loss[16]).zfill(3),str(Epoch_image_mask_loss[32]).zfill(3),str(Epoch_image_mask_loss[64]).zfill(3))+\
"\nmask_loss:{}".format(str(Epoch_mask_loss).zfill(3)))
else:
print("time:"+str(round(time.time()-startImgTime, 2))+ \
"\ncircle_cls_loss:{} {} {} {}".format(str(Epoch_circle_cls_Loss[8]).zfill(3),str(Epoch_circle_cls_Loss[16]).zfill(3),str(Epoch_circle_cls_Loss[32]).zfill(3),str(Epoch_circle_cls_Loss[64]).zfill(3))+\
"\nmask_loss:{}".format(str(Epoch_mask_loss).zfill(3)))
# "\nimage_mask:{}".format(str(Epoch_image_mask_loss).zfill(3)))
# "\nanchor_cls_loss:{} {} {} {} {}".format(str(Epoch_anchor_cls_Loss[8]).zfill(3),str(Epoch_anchor_cls_Loss[16]).zfill(3),str(Epoch_anchor_cls_Loss[32]).zfill(3),str(Epoch_anchor_cls_Loss[64]).zfill(3),str(Epoch_anchor_cls_Loss[128]).zfill(3))+ \
# "\nanchor_reg_loss:{} {} {} {} {}".format(str(Epoch_anchor_reg_Loss[8]).zfill(3),str(Epoch_anchor_reg_Loss[16]).zfill(3),str(Epoch_anchor_reg_Loss[32]).zfill(3),str(Epoch_anchor_reg_Loss[64]).zfill(3),str(Epoch_anchor_reg_Loss[128]).zfill(3)))
# print("\npositive Loss:{}".format(posEpochLoss))
#============ TensorBoard logging ============#
# (1) Log the scalar values
print('save Loss!')
self.logger.scalar_summary(phase+'_mask_'+str(stride), Epoch_mask_loss, self.curEpoch)
for stride in self.strides:
self.logger.scalar_summary(phase+'_circle_cls_'+str(stride), Epoch_circle_cls_Loss[stride], self.curEpoch)
torch.cuda.empty_cache()
# self.logger.scalar_summary(phase+'_circle_reg_'+str(stride), Epoch_circle_reg_Loss[stride], self.curEpoch)
# self.logger.scalar_summary(phase+'_anchor_cls_'+str(stride), Epoch_anchor_cls_Loss[stride], self.curEpoch)
# self.logger.scalar_summary(phase+'_anchor_reg_'+str(stride), Epoch_anchor_reg_Loss[stride], self.curEpoch)
# (2) Log values and gradients of the parameters (histogram)
if __name__ == '__main__':
trainDatasetroot = config.trainDatasetroot#'/home/shf/fudan_ocr_system/datasets/ICDAR15/Text_Localization/test'
testDatasetroot = config.testDatasetroot#'/home/shf/fudan_ocr_system/datasets/ICDAR15/Text_Localization/train'
modelHome = config.modelHome#'/home/shf/fudan_ocr_system/LSN/pretrainmodel'
outputModelHome = config.outputModelHome#'/home/shf/fudan_ocr_system/LSN/lib/model/data/2019AAAI/output/resnet50/outputmodel'
outputLogHome = config.outputLogHome#'/home/shf/fudan_ocr_system/LSN/lib/model/data/2019AAAI/output/resnet50/outputlog'
no = networkOptimier(trainDatasetroot, testDatasetroot, modelHome, outputModelHome, outputLogHome)
no.trainval()
```
#### File: model/unet/unet_model.py
```python
import torch
import torch.nn.functional as F
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import logging
# python 3 confusing imports :(
from model.detection_model.LSN.lib.model.unet.unet_parts import *
from model.detection_model.LSN.lib.model.roi_align.modules.roi_align import RoIAlign
class UNet(nn.Module):
def __init__(self, n_channels, n_classes=1):
super(UNet, self).__init__()
self.inc = inconv(n_channels, 64)
self.down1 = down(64, 128)
self.down2 = down(128, 256)
self.down3 = down(256, 512)
self.down4 = down(512, 512)
self.down5 = down(512, 512)
self.down6 = down(512, 512)
self.up1 = up(1024, 512)
self.up2 = up(1024, 512)
self.up3 = up(1024, 256)
self.up4 = up(512, 256)
self.up5 = up(512, 256)
self.up6 = up(512, 256)
# self.outc = outconv(64, n_classes)
self.outc = outconv(64, n_classes)
# self.outc = outconv(64, n_classes)
self.stride8_circle_cls = nn.Sequential(nn.Conv2d(256,128,kernel_size=3,padding=1),nn.Conv2d(128,8,kernel_size=1,padding=0))
self.stride16_circle_cls = nn.Sequential(nn.Conv2d(512,256,kernel_size=3,padding=1),nn.Conv2d(256,8,kernel_size=1,padding=0))
self.stride32_circle_cls = nn.Sequential(nn.Conv2d(512,256,kernel_size=3,padding=1),nn.Conv2d(256,8,kernel_size=1,padding=0))
self.stride64_circle_cls = nn.Sequential(nn.Conv2d(512,256,kernel_size=3,padding=1),nn.Conv2d(256,8,kernel_size=1,padding=0))
self.mask_generate = nn.Sequential(
nn.Conv2d(256,256,kernel_size=3,padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256,256,kernel_size=1,padding=0),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256,256,kernel_size=3,padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256,256,kernel_size=3,padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(256,256,kernel_size=3,stride=2,padding=1,output_padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256,1,kernel_size=1,padding=0),
nn.Sigmoid()
)
self.roi_align = RoIAlign(14, 14,extrapolation_value=0)
self._init_weights()
def forward(self, x,all_circle,gt_labels,istraining = True,threshold=0.5):
conv = {}
x = self.inc(x)
x = self.down1(x)
x = self.down2(x)
x4 = self.down3(x)
x5 = self.down4(x4)
x6 = self.down5(x5)
conv[64] = self.down6(x6)
conv[32] = self.up1(conv[64],x6)
conv[16] = self.up2(conv[32], x5)
conv[8] = self.up3(conv[16], x4)
circle_labels = {}
# del concate_conv
strides = [8,16,32,64]
mask_conv = None
# image_mask = self.image_mask_generate(mask_conv)
# print(mask_conv.size())
# circle_regres = {}
# anchor_labels = {}
# anchor_regres = {}
circle_cls = {8:self.stride8_circle_cls,16:self.stride16_circle_cls,32:self.stride32_circle_cls,64:self.stride64_circle_cls}
mask_feature = None
mask_conv_feature = None
roi = None
bbox_idx = None
pos_idx_stride = {}
bbox_score = None
# _,_,H,W = conv[8].size()
#roi_feature = F.upsample(conv[64],size=(H,W),mode='bilinear')+F.upsample(conv[32],size=(H,W),mode='bilinear')+F.upsample(conv[16],size=(H,W),mode='bilinear')+conv[8]
for stride in strides:
# circle classify
stride_circle_cls = None
# print(stride,conv[stride].size())
stride_circle_cls = circle_cls[stride](conv[stride])
n,c,h,w = stride_circle_cls.size()
stride_circle_cls = stride_circle_cls.view(n,2,int(c/2),h,w)
circle_labels[str(stride)] = stride_circle_cls.permute(0,3,4,2,1).contiguous().view(n*int(c/2)*h*w,2)
## roi
if istraining:
sort_score,sort_idx = torch.sort(gt_labels[str(stride)],descending=True)
postive_num = int(torch.sum(gt_labels[str(stride)])+1)
# print(postive_num)
select_postive_num = max(1,min(100,postive_num))
# select_negtive_num = max(1,int(select_postive_num/4))
postive_idx = np.random.randint(0,postive_num,size=select_postive_num)
# negtive_idx = np.random.randint(postive_num,gt_labels[str(stride)].size(0),size=select_negtive_num)
select_idx = torch.from_numpy(postive_idx).cuda()
pos_idx = sort_idx[select_idx]
bbox = all_circle[str(stride)][pos_idx,:]
pos_idx_stride[str(stride)] = pos_idx
if type(bbox_score) == type(None):
bbox_score = sort_score[select_idx]
else:
bbox_score = torch.cat((bbox_score,sort_score[select_idx]),0)
else:
pred_labels = circle_labels[str(stride)]
prod = nn.functional.softmax(pred_labels)
numpy_prod = prod.data.cpu().numpy()
length = min(len(np.where(numpy_prod[:,1]>=threshold)[0]),1000)
print(length)
score = prod[:,1]
sort_score,sort_idx = torch.sort(score,descending=True)
# num = int(torch.sum(sort_score>=threshold).data.cpu())+1
num = length+1
# print(stride,num,length,sort_score[length-1].data.cpu())
pos_idx = sort_idx[:num]
bbox = all_circle[str(stride)][pos_idx,:]
pos_idx_stride[str(stride)] = pos_idx
if type(bbox_score) == type(None):
bbox_score = sort_score[:num]
else:
bbox_score = torch.cat((bbox_score,sort_score[:num]),0)
# image = np.zeros((1024,1024,3),dtype=np.uint8)
# # print(image)
# image[:,:,:] = 255
# for box in bbox:
# cv2.rectangle(image,(int(box[0]),int(box[1])),(int(box[2]),int(box[3])),(0,0,255),3)
# cv2.imshow('image',image)
# cv2.waitKey(0)
# print(roi_bbox)
if type(roi) == type(None):
roi = bbox
else:
roi = torch.cat((roi,bbox),0)
bbox_idx = torch.IntTensor(len(roi))
bbox_idx = Variable(bbox_idx.fill_(0).cuda(),requires_grad=False)
roi_bbox = roi*1.0/8
mask_feature = self.roi_align(conv[8],roi_bbox,bbox_idx)
if type(mask_feature) == type(None):
pred_mask = None
else:
# print(mask_feature)
pred_mask = self.mask_generate(mask_feature).squeeze()#.permute(0,2,3,1).contiguous()
# print(pred_mask)
# del conv
return circle_labels,pred_mask,roi,bbox_idx,pos_idx_stride,bbox_score#,circle_regres#,anchor_labels,anchor_regres
def _init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
for name,param in m.named_parameters():
param.requires_grad = True
if name.split('.')[-1] == "weight":
nn.init.normal(m.state_dict()[name], mean, stddev)
elif name.split('.')[-1] == "bias":
nn.init.constant(m.state_dict()[name], mean)
normal_init(self.stride8_circle_cls, 0, 0.01)
normal_init(self.stride16_circle_cls, 0, 0.01)
normal_init(self.stride32_circle_cls, 0, 0.01)
normal_init(self.stride64_circle_cls, 0, 0.01)
# normal_init(self.stride8_concate, 0, 0.01)
# normal_init(self.stride16_concate, 0, 0.01)
# normal_init(self.stride32_concate, 0, 0.01)
normal_init(self.mask_generate, 0, 0.01)
# normal_init(self.RCNN_deconv, 0, 0.01)
print('init success!')
if __name__ == '__main__':
from torch.autograd import Variable
unet = UNet(3, 2).cuda()
x = Variable(torch.FloatTensor(1, 3, 1024, 1024)).cuda()
print(x.size())
y = unet(x)
# print(y.size())
```
#### File: model/utils/log.py
```python
import os
import logging
def printlog(self,data):
for dat in data:
logging.critical(dat)
```
#### File: maskrcnn_benchmark/engine/extra_utils.py
```python
import numpy as np
import cv2
from skimage.measure import find_contours
import pycocotools.mask as maskUtils
def to_poly(rle):
mask = maskUtils.decode(rle)
padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
area = float(maskUtils.area(rle))
if len(contours) == 0:
return [[]], area
poly = np.fliplr(contours[0]).astype(np.int32).tolist()
return poly, area
def coco_results_to_contest(coco_result):
output_result = {}
bbox_result = {}
for idx, result in enumerate(coco_result['segm']):
# segm
res_name = result['image_id'].replace('gt_', 'res_').replace('.jpg', '')
mask_conf = result['score']
res_points, res_area = to_poly(result['segmentation'])
res_size = result['segmentation']['size']
if not (len(res_points) > 0 and len(res_points[0]) > 0):
continue
# bbox
bbox = coco_result['bbox'][idx]['bbox']
bbox_conf = coco_result['bbox'][idx]['score']
res_bbox = xywha_to_xyxy(bbox).astype(np.int32).tolist()
# init
if res_name not in output_result:
output_result[res_name] = []
bbox_result[res_name] = []
output_result[res_name].append({
"points": res_points,
"confidence": mask_conf,
'area': res_area,
'size': res_size
})
bbox_result[res_name].append({
"points": res_bbox,
"confidence": bbox_conf
})
return output_result, bbox_result
def get_mask(box, shape):
"""根据box获取对应的掩膜"""
tmp_mask = np.zeros(shape, dtype="uint8")
tmp = np.array(box, dtype=np.int32).reshape(-1, 2)
cv2.fillPoly(tmp_mask, [tmp], 255)
# tmp_mask=cv2.bitwise_and(tmp_mask,mask)
return tmp_mask, cv2.countNonZero(tmp_mask)
def comput_mmi(area_a, area_b, intersect):
"""
计算MMI,2018.11.23 add
:param area_a: 实例文本a的mask的面积
:param area_b: 实例文本b的mask的面积
:param intersect: 实例文本a和实例文本b的相交面积
:return:
"""
eps = 1e-5
if area_a == 0 or area_b == 0:
area_a += eps
area_b += eps
print("the area of text is 0")
return max(float(intersect)/area_a, float(intersect)/area_b)
def mask_nms(polygons, shape, mmi_thres=0.5, conf_thres=0.4):
"""
mask nms 实现函数
:param polygons: 检测结果,[{'points':[[],[],[]],'confidence':int},{},{}]
:param shape: 当前检测的图片原大小
:param mmi_thres: 检测的阈值
:param conf_thres: 检测的阈值
"""
# 获取bbox及对应的score
bbox_infos = []
areas = []
scores = []
for poly in polygons:
if poly['confidence'] > conf_thres:
bbox_infos.append(poly['points'])
areas.append(poly['area'])
scores.append(poly['confidence'])
# print('before ',len(bbox_infos))
keep = []
# order = np.array(scores).argsort()[::-1]
order = np.array(areas).argsort()[::-1]
# print("order:{}".format(order))
nums = len(bbox_infos)
suppressed = np.zeros(nums, dtype=np.int)
# print("lens:{}".format(nums))
# 循环遍历
for i in range(nums):
idx = order[i]
if suppressed[idx] == 1:
continue
keep.append(idx)
mask_a, area_a = get_mask(bbox_infos[idx], shape)
for j in range(i, nums):
idx_j = order[j]
if suppressed[idx_j] == 1:
continue
mask_b, area_b = get_mask(bbox_infos[idx_j], shape)
# 获取两个文本的相交面积
merge_mask = cv2.bitwise_and(mask_a, mask_b)
area_intersect = cv2.countNonZero(merge_mask)
# 计算MMI
mmi = comput_mmi(area_a, area_b, area_intersect)
# print("area_a:{},area_b:{},inte:{},mmi:{}".format(area_a,area_b,area_intersect,mmi))
if mmi >= mmi_thres:
suppressed[idx_j] = 1
or_mask = cv2.bitwise_or(mask_a, mask_b)
sum_area = cv2.countNonZero(or_mask)
padded_mask = np.zeros((or_mask.shape[0] + 2, or_mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = or_mask
contours = find_contours(padded_mask, 0.5)
poly = np.fliplr(contours[0]).astype(np.int32).tolist()
bbox_infos[idx] = poly
areas[idx] = sum_area
dets = []
for kk in keep:
dets.append({
'points': bbox_infos[kk],
'confidence': scores[kk]
})
return dets
def xywha_to_xyxy(rect):
cx, cy, w, h, angle = rect
lt = [cx - w / 2, cy - h / 2, 1]
rt = [cx + w / 2, cy - h / 2, 1]
lb = [cx - w / 2, cy + h / 2, 1]
rb = [cx + w / 2, cy + h / 2, 1]
pts = [lt, rt, rb, lb]
angle = -angle
cos_cita = np.cos(np.pi / 180 * angle)
sin_cita = np.sin(np.pi / 180 * angle)
M0 = np.array([[1, 0, 0], [0, 1, 0], [-cx, -cy, 1]])
M1 = np.array([[cos_cita, sin_cita, 0], [-sin_cita, cos_cita, 0], [0, 0, 1]])
M2 = np.array([[1, 0, 0], [0, 1, 0], [cx, cy, 1]])
rotation_matrix = M0.dot(M1).dot(M2)
rotated_pts = np.dot(np.array(pts), rotation_matrix)[:, :2]
return rotated_pts.astype(np.int32)
def rotate_pts(pts, rotate_ct, angle):
pts = [pt+[1] for pt in pts]
cx, cy = rotate_ct
angle = -angle
cos_cita = np.cos(np.pi / 180 * angle)
sin_cita = np.sin(np.pi / 180 * angle)
M0 = np.array([[1, 0, 0], [0, 1, 0], [-cx, -cy, 1]])
M1 = np.array([[cos_cita, sin_cita, 0], [-sin_cita, cos_cita, 0], [0, 0, 1]])
M2 = np.array([[1, 0, 0], [0, 1, 0], [cx, cy, 1]])
rotation_matrix = M0.dot(M1).dot(M2)
rotated_pts = np.dot(np.array(pts), rotation_matrix)[:, :2]
return rotated_pts.astype(np.int32)
```
#### File: modeling/backbone/TCM.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class tcm(nn.Module):
"""docstring for TCM"""
def __init__(self,out_channels):
super(tcm, self).__init__()
self.conv0=nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.convvvv1= nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.bnnnn1 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.01)
self.convvvv2 = nn.Conv2d(out_channels, 2, kernel_size=1)
# self.bnnnn2 = nn.BatchNorm2d(2, eps=0.001, momentum=0.01)
self.relu = nn.ReLU(inplace=True)
self.softmax = nn.Softmax(dim=2)
# for l in [self.convvvv1, self.convvvv2]:
# torch.nn.init.normal_(l.weight, std=0.01)
# torch.nn.init.constant_(l.bias, 0)
for module in [self.conv0,self.convvvv1, self.convvvv2]:
# Caffe2 implementation uses XavierFill, which in fact
# corresponds to kaiming_uniform_ in PyTorch
nn.init.kaiming_uniform_(module.weight, a=1)
nn.init.constant_(module.bias, 0)
def forward(self,x):
att=self.conv0(x)
mid=self.relu(self.bnnnn1(self.convvvv1(att)))
map=self.relu(self.convvvv2(mid))
saliency_map=torch.exp(self.softmax(map))[:,:1,:,:]
saliency_map = saliency_map.expand_as(x)
return saliency_map
```
#### File: maskrcnn_benchmark/modeling/level_mapper.py
```python
import torch
import torch.nn.functional as F
from torch import nn
from .utils import cat
class LevelMapper(nn.Module):
"""Determine which FPN level each RoI in a set of RoIs should map to based
on the heuristic in the FPN paper.
"""
def __init__(self, scales, canonical_scale=224, canonical_level=4, eps=1e-6):
"""
Arguments:
scales (list)
canonical_scale (int)
canonical_level (int)
eps (float)
"""
super(LevelMapper, self).__init__()
self.scales = scales
self.k_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
self.k_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
self.s0 = canonical_scale
self.lvl0 = canonical_level
self.eps = eps
def convert_to_roi_format(self, boxes):
"""
:param boxes:
:return: rois list(batch_idx, x, y, w, h)
"""
concat_boxes = cat([b.bbox for b in boxes], dim=0)
device, dtype = concat_boxes.device, concat_boxes.dtype
ids = cat(
[
torch.full((len(b), 1), i, dtype=dtype, device=device)
for i, b in enumerate(boxes)
],
dim=0,
)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def forward(self, x, boxes):
"""
Arguments:
x (list[Tensor]): feature maps for each level
boxes (list[BoxList]): boxes to be used to perform the pooling operation.
Returns:
results (list[dict])
"""
rois = self.convert_to_roi_format(boxes)
# Compute level ids
s = torch.sqrt(cat([boxlist.area() for boxlist in boxes]))
# Eqn.(1) in FPN paper
target_lvls = torch.floor(self.lvl0 + torch.log2(s / self.s0 + self.eps))
target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max)
levels = target_lvls.to(torch.int64) - self.k_min
# for each level, crop feature maps in the rois of this level
results = []
for level, per_level_feature in enumerate(x):
idx_in_level = torch.nonzero(levels == level).squeeze(1)
rois_per_level = rois[idx_in_level]
for batch_idx, ori_x, ori_y, ori_w, ori_h in rois_per_level:
batch_idx = int(batch_idx)
x = (int(ori_x * self.scales[level]) // 2) * 2
y = (int(ori_y * self.scales[level]) // 2) * 2
w = (int(ori_w * self.scales[level]) // 2) * 2
h = (int(ori_h * self.scales[level]) // 2) * 2
crop = per_level_feature[batch_idx:batch_idx+1, :, y:y+h, x:x+w]
# rescale to the same level 0
for i in range(level):
crop = nn.functional.interpolate(crop, scale_factor=2, mode='bilinear', align_corners=True)
x *= 2
y *= 2
# save to results
results.append({
'batch_idx': batch_idx,
'feature_map': crop,
'roi': [x, y, crop.shape[3], crop.shape[2]]
})
return results
```
#### File: roi_heads/mask_head/inference.py
```python
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from skimage.measure import find_contours
import cv2
from maskrcnn_benchmark.structures.bounding_box import RBoxList
from maskrcnn_benchmark.engine.extra_utils import rotate_pts
# TODO check if want to return a single BoxList or a composite
# object
class MaskPostProcessor(nn.Module):
"""
From the results of the CNN, post process the masks
by taking the mask corresponding to the class with max
probability (which are of fixed size and directly output
by the CNN) and return the masks in the mask field of the BoxList.
If a masker object is passed, it will additionally
project the masks in the image according to the locations in boxes,
"""
def __init__(self, masker=None):
super(MaskPostProcessor, self).__init__()
self.masker = masker
def forward(self, x, boxes):
"""
Arguments:
x (Tensor): the mask logits
boxes (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra field mask
"""
mask_prob = x.sigmoid()
# select masks coresponding to the predicted classes
num_masks = x.shape[0]
labels = [bbox.get_field("labels") for bbox in boxes]
labels = torch.cat(labels)
index = torch.arange(num_masks, device=labels.device)
mask_prob = mask_prob[index, labels][:, None]
boxes_per_image = [len(box) for box in boxes]
mask_prob = mask_prob.split(boxes_per_image, dim=0)
if self.masker:
mask_prob = self.masker(mask_prob, boxes)
results = []
for prob, box in zip(mask_prob, boxes):
bbox = RBoxList(box.bbox, box.size, mode="xywha")
for field in box.fields():
bbox.add_field(field, box.get_field(field))
bbox.add_field("mask", prob)
results.append(bbox)
return results
class MaskPostProcessorCOCOFormat(MaskPostProcessor):
"""
From the results of the CNN, post process the results
so that the masks are pasted in the image, and
additionally convert the results to COCO format.
"""
def forward(self, x, boxes):
import pycocotools.mask as mask_util
import numpy as np
results = super(MaskPostProcessorCOCOFormat, self).forward(x, boxes)
for result in results:
masks = result.get_field("mask").cpu()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
result.add_field("mask", rles)
return results
# the next two functions should be merged inside Masker
# but are kept here for the moment while we need them
# temporarily gor paste_mask_in_image
def expand_boxes(boxes, scale):
# boxes: tensor[[x, y, w, h, a]]
boxes[:, 2] *= scale
boxes[:, 3] *= scale
return boxes
def expand_masks(mask, padding):
N = mask.shape[0]
M = mask.shape[-1]
pad2 = 2 * padding
scale = float(M + pad2) / M
padded_mask = mask.new_zeros((N, 1, M + pad2, M + pad2))
padded_mask[:, :, padding:-padding, padding:-padding] = mask
return padded_mask, scale
def paste_mask_in_image(mask, box, im_h, im_w, thresh=0.5, padding=1):
padded_mask, scale = expand_masks(mask[None], padding=padding)
mask = padded_mask[0, 0]
box = expand_boxes(box[None], scale)[0]
box = box.to(dtype=torch.int32)
cx = box[0].numpy()
cy = box[1].numpy()
w = max(box[2].numpy(), 1)
h = max(box[3].numpy(), 1)
angle = box[4].numpy()
# Set shape to [batchxCxHxW]
mask = mask.expand((1, 1, -1, -1))
# Resize mask
mask = mask.to(torch.float32)
mask = F.interpolate(mask, size=(h, w), mode='bilinear', align_corners=False)
mask = mask[0][0]
if thresh >= 0:
mask = mask > thresh
else:
# for visualization and debugging, we also
# allow it to return an unmodified mask
mask = (mask * 255).to(torch.uint8)
# # get mask pts
# pts = np.transpose(np.nonzero(mask.numpy()))
# if len(pts) == 0:
# pass
# pts = np.fliplr(pts).astype(np.int32)
#
# # rotate pts
# rotated_pts = pts.copy()
# rotated_pts[:, 0] += cx - int(w/2)
# rotated_pts[:, 1] += cy - int(h/2)
# rotated_pts = rotate_pts(rotated_pts.tolist(), (cx, cy), angle)
#
# im_mask = torch.zeros((im_h, im_w), dtype=torch.uint8)
#
# for (pt_x, pt_y), (rpt_x, rpt_y) in zip(pts, rotated_pts):
# if 0 <= rpt_x < im_w and 0 <= rpt_y < im_h:
# im_mask[rpt_y, rpt_x] = mask[pt_y, pt_x]
#
# return im_mask
# get mask contour
padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, thresh)
im_mask = np.zeros((im_h, im_w), dtype=np.uint8)
if len(contours) > 0:
poly = np.fliplr(contours[0]).astype(np.int32)
# rotate poly
rotated_poly = poly.copy()
rotated_poly[:, 0] += int(cx - w/2)
rotated_poly[:, 1] += int(cy - h/2)
rotated_poly = rotate_pts(rotated_poly.tolist(), (cx, cy), angle)
# get rotated mask
cv2.fillPoly(im_mask, [rotated_poly], 1)
return torch.from_numpy(im_mask)
class Masker(object):
"""
Projects a set of masks in an image on the locations
specified by the bounding boxes
"""
def __init__(self, threshold=0.5, padding=1):
self.threshold = threshold
self.padding = padding
def forward_single_image(self, masks, boxes):
im_w, im_h = boxes.size
res = [
paste_mask_in_image(mask[0], box, im_h, im_w, self.threshold, self.padding)
for mask, box in zip(masks, boxes.bbox)
]
if len(res) > 0:
res = torch.stack(res, dim=0)[:, None]
else:
res = masks.new_empty((0, 1, masks.shape[-2], masks.shape[-1]))
return res
def __call__(self, masks, boxes):
if isinstance(boxes, RBoxList):
boxes = [boxes]
# Make some sanity check
assert len(boxes) == len(masks), "Masks and boxes should have the same length."
# TODO: Is this JIT compatible?
# If not we should make it compatible.
results = []
for mask, box in zip(masks, boxes):
assert mask.shape[0] == len(box), "Number of objects should be the same."
result = self.forward_single_image(mask, box)
results.append(result)
return results
def make_roi_mask_post_processor(cfg):
if cfg.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS:
mask_threshold = cfg.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS_THRESHOLD
masker = Masker(threshold=mask_threshold, padding=1)
else:
masker = None
mask_post_processor = MaskPostProcessor(masker)
return mask_post_processor
```
#### File: roi_heads/textsnake_heads/roi_textsnake_predictors.py
```python
import torch
from torch import nn
from torch.nn import functional as F
from maskrcnn_benchmark.layers import Conv2d
from maskrcnn_benchmark.layers import ConvTranspose2d
class MaskRCNNC4Predictor(nn.Module):
def __init__(self, cfg):
super(MaskRCNNC4Predictor, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
dim_reduced = cfg.MODEL.ROI_TEXTSNAKE_HEAD.CONV_LAYERS[-1]
if cfg.MODEL.ROI_HEADS.USE_FPN:
num_inputs = dim_reduced
else:
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
num_inputs = res2_out_channels * stage2_relative_factor
self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)
self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
for name, param in self.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
def forward(self, x):
x = F.relu(self.conv5_mask(x))
return self.mask_fcn_logits(x)
class TextsnakeC4Predictor(nn.Module):
def __init__(self, cfg):
super(TextsnakeC4Predictor, self).__init__()
self.num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
dim_reduced = cfg.MODEL.ROI_TEXTSNAKE_HEAD.CONV_LAYERS[-1]
if cfg.MODEL.ROI_HEADS.USE_FPN:
num_inputs = dim_reduced
else:
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
num_inputs = res2_out_channels * stage2_relative_factor
self.conv5_mask_tr = Conv2d(num_inputs, dim_reduced, 3, stride=1, padding=1)
self.mask_fcn_logits_tr = Conv2d(dim_reduced, self.num_classes, 1, 1, 0)
self.conv5_mask_tcl = Conv2d(num_inputs, dim_reduced, 3, stride=1, padding=1)
self.mask_fcn_logits_tcl = Conv2d(dim_reduced, self.num_classes, 1, 1, 0)
self.conv5_mask_geo = Conv2d(num_inputs, dim_reduced, 3, stride=1, padding=1)
self.mask_fcn_logits_geo = Conv2d(dim_reduced, 1, 1, 1, 0)
for name, param in self.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
def forward(self, crop_feature_list, features):
batch_size = features[0].shape[0]
output_channels = self.num_classes * 2 + 1
H = features[0].shape[2] # 1/4 of the input height
W = features[0].shape[3] # 1/4 of the input width
device, dtype = crop_feature_list[0]['feature_map'].device, crop_feature_list[0]['feature_map'].dtype
mask_logits = torch.zeros((batch_size, output_channels, H, W), dtype=dtype, device=device)
for crop_feature in crop_feature_list:
x = crop_feature['feature_map'] # (1, C, H ,W)
batch_idx = crop_feature['batch_idx']
roi_x, roi_y, roi_w, roi_h = crop_feature['roi']
# conv
tr = self.mask_fcn_logits_tr(F.relu(self.conv5_mask_tr(x)))
tcl = self.mask_fcn_logits_tcl(F.relu(self.conv5_mask_tcl(x)))
geo = self.mask_fcn_logits_geo(F.relu(self.conv5_mask_geo(x)))
# accumulate
mask_logits[batch_idx:batch_idx+1, 0:2, roi_y:roi_y+roi_h, roi_x:roi_x+roi_w] += tr
mask_logits[batch_idx:batch_idx+1, 2:4, roi_y:roi_y+roi_h, roi_x:roi_x+roi_w] += tcl
mask_logits[batch_idx:batch_idx+1, 4:, roi_y:roi_y+roi_h, roi_x:roi_x+roi_w] += geo
return mask_logits
_ROI_TEXTSNAKE_PREDICTOR = {
"MaskRCNNC4Predictor": MaskRCNNC4Predictor,
"TextsnakeC4Predictor": TextsnakeC4Predictor
}
def make_roi_textsnake_predictor(cfg):
func = _ROI_TEXTSNAKE_PREDICTOR[cfg.MODEL.ROI_TEXTSNAKE_HEAD.PREDICTOR]
return func(cfg)
```
#### File: PixelLink/ImgLib/util.py
```python
import cv2
import numpy as np
def find_contours(mask, method = None):
if method is None:
method = cv2.CHAIN_APPROX_SIMPLE
mask = np.asarray(mask, dtype = np.uint8)
mask = mask.copy()
try:
contours, _ = cv2.findContours(mask, mode = cv2.RETR_CCOMP,
method = method)
except:
_, contours, _ = cv2.findContours(mask, mode = cv2.RETR_CCOMP,
method = method)
return contours
```
#### File: TextSnake_pytorch/dataset/area_analysis.py
```python
import cv2
import os
import numpy as np
from model.detection_model.TextSnake_pytorch.dataset.read_json import read_json, read_dict
def recorder(record, area):
ranges = [key.split('~') for key in record.keys()]
for range in ranges:
if int(range[0]) <= area <= int(range[1]):
record['{}~{}'.format(range[0], range[1])] += 1
break
return record
if __name__ == '__main__':
path = '/home/shf/fudan_ocr_system/datasets/ICDAR19'
json_name = 'train_labels.json'
maxlen = 1280
train_files = os.listdir(os.path.join(path, 'train_images'))
test_files = os.listdir(os.path.join(path, 'test_images'))
data_dict = read_json(os.path.join(path, json_name))
legal_record = {
'0~99': 0,
'100~499': 0,
'500~999': 0,
'1000~1999': 0,
'2000~2999': 0,
'3000~3999': 0,
'4000~4999': 0,
'5000~5999': 0,
'6000~6999': 0,
'7000~7999': 0,
'8000~8999': 0,
'9000~9999': 0,
'10000~19999': 0,
'20000~29999': 0,
'30000~39999': 0,
'40000~49999': 0,
'50000~59999': 0,
'60000~69999': 0,
'70000~79999': 0,
'80000~89999': 0,
'90000~99999': 0,
'100000~99999999': 0
}
illegal_record = {
'0000~0099': 0,
'0100~0499': 0,
'0500~0999': 0,
'1000~1999': 0,
'2000~99999999': 0
}
max_area = -1
min_area = 999999999
with open('record.txt', 'w') as f:
for idx, file in enumerate(train_files):
polygons = read_dict(data_dict, file)
im = cv2.imread(os.path.join(path, 'train_images', file))
h, w = im.shape[:2]
scale = 1.0
if max(h, w) > maxlen:
scale = float(maxlen) / h if h > w else float(maxlen) / w
im = cv2.resize(im, (int(w*scale), int(h*scale)))
print(idx, file, len(polygons))
for polygon in polygons:
polygon.points[:, 0] = (polygon.points[:, 0] * scale).astype(np.int32)
polygon.points[:, 1] = (polygon.points[:, 1] * scale).astype(np.int32)
if not polygon.illegibility:
drawing = np.zeros(im.shape[:2], np.uint8)
poly_mask = cv2.fillPoly(drawing, np.array([polygon.points], dtype=np.int32), 255)
area = np.sum(np.greater(poly_mask, 0))
f.write(str(area) + '\n')
if area >= max_area:
max_area = area
if area <= min_area:
min_area = area
recorder(legal_record, area)
else:
drawing = np.zeros(im.shape[:2], np.uint8)
poly_mask = cv2.fillPoly(drawing, np.array([polygon.points], dtype=np.int32), 255)
area = np.sum(np.greater(poly_mask, 0))
recorder(illegal_record, area)
if idx % 10 == 0:
print('record: ', legal_record)
print('illegal: ', illegal_record)
print('max_area: {}, min_area: {}'.format(max_area, min_area))
print('record: ', legal_record)
print('illegal: ', illegal_record)
print('max_area: {}, min_area: {}'.format(max_area, min_area))
print("Test Images")
with open('record2.txt', 'w') as f:
for idx, file in enumerate(test_files):
polygons = read_dict(data_dict, file)
im = cv2.imread(os.path.join(path, 'test_images', file))
h, w = im.shape[:2]
scale = 1.0
if max(h, w) > maxlen:
scale = float(maxlen) / h if h > w else float(maxlen) / w
im = cv2.resize(im, (int(w * scale), int(h * scale)))
print(idx, file, len(polygons))
for polygon in polygons:
polygon.points[:, 0] = (polygon.points[:, 0] * scale).astype(np.int32)
polygon.points[:, 1] = (polygon.points[:, 1] * scale).astype(np.int32)
if not polygon.illegibility:
drawing = np.zeros(im.shape[:2], np.uint8)
poly_mask = cv2.fillPoly(drawing, np.array([polygon.points], dtype=np.int32), 255)
area = np.sum(np.greater(poly_mask, 0))
f.write(str(area) + '\n')
if area >= max_area:
max_area = area
if area <= min_area:
min_area = area
recorder(legal_record, area)
else:
drawing = np.zeros(im.shape[:2], np.uint8)
poly_mask = cv2.fillPoly(drawing, np.array([polygon.points], dtype=np.int32), 255)
area = np.sum(np.greater(poly_mask, 0))
recorder(illegal_record, area)
if idx % 10 == 0:
print('record: ', legal_record)
print('illegal: ', illegal_record)
print('max_area: {}, min_area: {}'.format(max_area, min_area))
print('record: ', legal_record)
print('illegal: ', illegal_record)
print('max_area: {}, min_area: {}'.format(max_area, min_area))
```
#### File: TextSnake_pytorch/dataset/ratio_analysis.py
```python
import cv2
import os
import numpy as np
from model.detection_model.TextSnake_pytorch.dataset.read_json import read_json, read_dict
def recorder(record, ratio):
ranges = [key.split('~') for key in record.keys()]
for range in ranges:
if int(range[0]) <= ratio < int(range[1]):
record['{}~{}'.format(range[0], range[1])] += 1
break
return record
if __name__ == '__main__':
path = '/home/shf/fudan_ocr_system/datasets/ICDAR19/'
json_name = 'train_labels.json'
maxlen = 1280
train_files = os.listdir(os.path.join(path, 'train_images'))
test_files = os.listdir(os.path.join(path, 'test_images'))
data_dict = read_json(os.path.join(path, json_name))
legal_record = {
'1~2': 0,
'2~3': 0,
'3~4': 0,
'4~5': 0,
'5~6': 0,
'6~7': 0,
'7~99999999': 0,
}
illegal_record = {
'1~2': 0,
'2~3': 0,
'3~4': 0,
'4~5': 0,
'5~6': 0,
'6~7': 0,
'7~99999999': 0,
}
# max_area = -1
# min_area = 999999999
with open('record.txt', 'w') as f:
for idx, file in enumerate(train_files):
polygons = read_dict(data_dict, file)
im = cv2.imread(os.path.join(path, 'train_images', file))
h, w = im.shape[:2]
scale = 1.0
if max(h, w) > maxlen:
scale = float(maxlen) / h if h > w else float(maxlen) / w
im = cv2.resize(im, (int(w*scale), int(h*scale)))
print(idx, file, len(polygons))
for polygon in polygons:
polygon.points[:, 0] = (polygon.points[:, 0] * scale).astype(np.int32)
polygon.points[:, 1] = (polygon.points[:, 1] * scale).astype(np.int32)
if not polygon.illegibility:
drawing = np.zeros(im.shape[:2], np.uint8)
_, (w, h), _ = cv2.minAreaRect(polygon.points.astype(np.int32))
ratio = float(max(w, h)) / min(w, h)
f.write(str(ratio) + '\n')
recorder(legal_record, ratio)
else:
drawing = np.zeros(im.shape[:2], np.uint8)
_, (w, h), _ = cv2.minAreaRect(polygon.points.astype(np.int32))
ratio = float(max(w, h)) / min(w, h)
f.write(str(ratio) + '\n')
recorder(illegal_record, ratio)
if idx % 10 == 0:
print('record: ', legal_record)
print('illegal: ', illegal_record)
print('record: ', legal_record)
print('illegal: ', illegal_record)
print("Test Images")
with open('record2.txt', 'w') as f:
for idx, file in enumerate(test_files):
polygons = read_dict(data_dict, file)
im = cv2.imread(os.path.join(path, 'test_images', file))
h, w = im.shape[:2]
scale = 1.0
if max(h, w) > maxlen:
scale = float(maxlen) / h if h > w else float(maxlen) / w
im = cv2.resize(im, (int(w * scale), int(h * scale)))
print(idx, file, len(polygons))
for polygon in polygons:
polygon.points[:, 0] = (polygon.points[:, 0] * scale).astype(np.int32)
polygon.points[:, 1] = (polygon.points[:, 1] * scale).astype(np.int32)
if not polygon.illegibility:
drawing = np.zeros(im.shape[:2], np.uint8)
_, (w, h), _ = cv2.minAreaRect(polygon.points.astype(np.int32))
ratio = float(max(w, h)) / min(w, h)
f.write(str(ratio) + '\n')
recorder(legal_record, ratio)
else:
drawing = np.zeros(im.shape[:2], np.uint8)
_, (w, h), _ = cv2.minAreaRect(polygon.points.astype(np.int32))
ratio = float(max(w, h)) / min(w, h)
f.write(str(ratio) + '\n')
recorder(illegal_record, ratio)
if idx % 10 == 0:
print('record: ', legal_record)
print('illegal: ', illegal_record)
print('record: ', legal_record)
print('illegal: ', illegal_record)
```
#### File: TextSnake_pytorch/network/textnet.py
```python
import torch.nn as nn
import torch
import torch.nn.functional as F
from model.detection_model.TextSnake_pytorch.network.vgg import VGG16
from model.detection_model.TextSnake_pytorch.network.resnet import ResNet50
import json
class GCN(nn.Module):
def __init__(self, c, out_c, k=(7, 7)): # out_Channel=21 in paper
super(GCN, self).__init__()
self.conv_l1 = nn.Conv2d(c, out_c, kernel_size=(k[0], 1), padding=(int((k[0] - 1) / 2), 0))
self.conv_l2 = nn.Conv2d(out_c, out_c, kernel_size=(1, k[0]), padding=(0, int((k[0] - 1) / 2)))
self.conv_r1 = nn.Conv2d(c, out_c, kernel_size=(1, k[1]), padding=(0, int((k[1] - 1) / 2)))
self.conv_r2 = nn.Conv2d(out_c, out_c, kernel_size=(k[1], 1), padding=(int((k[1] - 1) / 2), 0))
def forward(self, x):
x_l = self.conv_l1(x)
x_l = self.conv_l2(x_l)
x_r = self.conv_r1(x)
x_r = self.conv_r2(x_r)
x = x_l + x_r
return x
class BR(nn.Module):
def __init__(self, out_c):
super(BR, self).__init__()
# self.bn = nn.BatchNorm2d(out_c)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(out_c, out_c, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(out_c, out_c, kernel_size=3, padding=1)
def forward(self, x):
x_res = self.conv1(x)
x_res = self.relu(x_res)
x_res = self.conv2(x_res)
x = x + x_res
return x
class Upsample(nn.Module):
def __init__(self, in_channels, out_channels, backbone='vgg'):
super().__init__()
self.backbone = backbone
if backbone == 'vgg':
self.conv1x1 = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
self.conv3x3 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
elif backbone == 'resnet':
self.conv1x1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
self.conv3x3 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.deconv = nn.ConvTranspose2d(out_channels, out_channels, kernel_size=4, stride=2, padding=1)
def forward(self, upsampled, shortcut, is_deconv=True):
x = torch.cat([upsampled, shortcut], dim=1)
x = self.conv1x1(x)
x = self.conv3x3(x)
if is_deconv:
x = self.deconv(x)
else:
x = nn.functional.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
return x
class TextNet(nn.Module):
def __init__(self, cfg):
super().__init__()
self.backbone_name = cfg.TEXTSNAKE.backbone
self.output_channel = cfg.TEXTSNAKE.output_channel
self.bottleneck = 32
if self.backbone_name == 'vgg':
self.backbone = VGG16()
self.deconv5 = nn.ConvTranspose2d(512, 512, kernel_size=4, stride=2, padding=1)
self.merge4 = Upsample(512 + 512, 256)
self.merge3 = Upsample(256 + 256, 128)
self.merge2 = Upsample(128 + 128, 64)
self.merge1 = Upsample(64 + 64, self.output_channel)
elif self.backbone_name == 'resnet':
self.backbone = ResNet50()
self.deconv5 = nn.ConvTranspose2d(self.output_channel, self.output_channel, kernel_size=4, stride=2, padding=1)
self.deconv4 = nn.ConvTranspose2d(self.output_channel, self.output_channel, kernel_size=4, stride=2, padding=1)
self.deconv3 = nn.ConvTranspose2d(self.output_channel, self.output_channel, kernel_size=4, stride=2, padding=1)
self.deconv2 = nn.ConvTranspose2d(self.output_channel, self.output_channel, kernel_size=4, stride=2, padding=1)
self.deconv1 = nn.ConvTranspose2d(self.output_channel, self.output_channel, kernel_size=4, stride=2, padding=1)
self.gcn5 = GCN(2048, self.output_channel)
self.gcn4 = GCN(1024, self.output_channel)
self.gcn3 = GCN(512, self.output_channel)
self.gcn2 = GCN(256, self.output_channel)
self.br5 = BR(self.output_channel)
self.br4_1 = BR(self.output_channel)
self.br4_2 = BR(self.output_channel)
self.br3_1 = BR(self.output_channel)
self.br3_2 = BR(self.output_channel)
self.br2_1 = BR(self.output_channel)
self.br2_2 = BR(self.output_channel)
self.br1 = BR(self.output_channel)
self.br0 = BR(self.output_channel)
elif self.backbone_name == 'resnet_gcn':
self.backbone = ResNet50()
self.deconv5 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv4 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv3 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv2 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv1_1 = nn.ConvTranspose2d(2, 2, kernel_size=4, stride=2, padding=1) # tr
self.deconv1_2 = nn.ConvTranspose2d(2, 2, kernel_size=4, stride=2, padding=1) # tcl
self.gcn5 = GCN(2048, self.bottleneck)
self.gcn4 = GCN(1024, self.bottleneck)
self.gcn3 = GCN(512, self.bottleneck)
self.gcn2 = GCN(256, self.bottleneck)
self.gcn1_1 = GCN(self.bottleneck, 2) # tr
self.gcn1_2 = GCN(self.bottleneck, 2) # tcl
self.br5 = BR(self.bottleneck)
self.br4_1 = BR(self.bottleneck)
self.br4_2 = BR(self.bottleneck)
self.br3_1 = BR(self.bottleneck)
self.br3_2 = BR(self.bottleneck)
self.br2_1 = BR(self.bottleneck)
self.br2_2 = BR(self.bottleneck)
self.br1_1 = BR(2) # tr
self.br1_2 = BR(2) # tcl
self.br0_1 = BR(2) # tr
self.br0_2 = BR(2) # tcl
self.conv1 = nn.Sequential(
nn.Conv2d(self.bottleneck, self.bottleneck, kernel_size=3, stride=1, padding=1),
nn.Conv2d(self.bottleneck, 3, kernel_size=1, stride=1, padding=0) # geo(sin, cos, radii)
)
elif self.backbone_name == 'resnet_gcn_new':
self.backbone = ResNet50()
self.deconv5 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv4 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv3 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv2 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv1_1 = nn.ConvTranspose2d(2, 2, kernel_size=4, stride=2, padding=1) # tr
self.deconv1_2 = nn.ConvTranspose2d(2, 2, kernel_size=4, stride=2, padding=1) # tcl
self.gcn5 = GCN(2048, self.bottleneck)
self.gcn4 = GCN(1024, self.bottleneck)
self.gcn3 = GCN(512, self.bottleneck)
self.gcn2 = GCN(256, self.bottleneck)
self.br5 = BR(self.bottleneck)
self.br4_1 = BR(self.bottleneck)
self.br4_2 = BR(self.bottleneck)
self.br3_1 = BR(self.bottleneck)
self.br3_2 = BR(self.bottleneck)
self.br2_1 = BR(self.bottleneck)
self.br2_2 = BR(self.bottleneck)
self.br1_1 = BR(2) # tr
self.br1_2 = BR(2) # tcl
self.br0_1 = BR(2) # tr
self.br0_2 = BR(2) # tcl
self.conv_tr = nn.Conv2d(self.bottleneck, 2, kernel_size=1, stride=1, padding=0)
self.conv_tcl = nn.Conv2d(self.bottleneck, 2, kernel_size=1, stride=1, padding=0)
self.conv_geo = nn.Sequential(
nn.Conv2d(self.bottleneck, self.bottleneck, kernel_size=3, stride=1, padding=1),
nn.Conv2d(self.bottleneck, 3, kernel_size=1, stride=1, padding=0) # geo(sin, cos, radii)
)
elif self.backbone_name == 'resnet_gcn_ms':
self.backbone = ResNet50()
self.deconv5 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv4 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv3 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv2_1 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv2_2 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv2_3 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv1_1 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv1_2 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.gcn5 = GCN(2048, self.bottleneck)
self.gcn4 = GCN(1024, self.bottleneck)
self.gcn3 = GCN(512, self.bottleneck)
self.gcn2 = GCN(256, self.bottleneck)
self.br5 = BR(self.bottleneck)
self.br4_1 = BR(self.bottleneck)
self.br4_2 = BR(self.bottleneck)
self.br3_1 = BR(self.bottleneck)
self.br3_2 = BR(self.bottleneck)
self.br2_1 = BR(self.bottleneck)
self.br2_2 = BR(self.bottleneck)
self.br1_1 = BR(self.bottleneck)
self.br1_2 = BR(self.bottleneck)
self.br0_1 = BR(self.bottleneck)
self.br0_2 = BR(self.bottleneck)
self.conv_tr_128 = nn.Conv2d(self.bottleneck, 2, kernel_size=1, stride=1, padding=0)
self.conv_tcl_128 = nn.Conv2d(self.bottleneck, 2, kernel_size=1, stride=1, padding=0)
self.conv_tr_256 = nn.Conv2d(self.bottleneck, 2, kernel_size=1, stride=1, padding=0)
self.conv_tcl_256 = nn.Conv2d(self.bottleneck, 2, kernel_size=1, stride=1, padding=0)
self.conv_tr_512 = nn.Conv2d(self.bottleneck, 2, kernel_size=1, stride=1, padding=0)
self.conv_tcl_512 = nn.Conv2d(self.bottleneck, 2, kernel_size=1, stride=1, padding=0)
self.conv_geo = nn.Sequential(
nn.Conv2d(self.bottleneck, self.bottleneck, kernel_size=3, stride=1, padding=1),
nn.Conv2d(self.bottleneck, 3, kernel_size=1, stride=1, padding=0) # geo(sin, cos, radii)
)
elif self.backbone_name == 'resnet_gcn_ms2':
self.backbone = ResNet50()
self.deconv5 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv4 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv3 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv2_1 = nn.ConvTranspose2d(2, 2, kernel_size=4, stride=2, padding=1)
self.deconv2_2 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv2_3 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.deconv1_1 = nn.ConvTranspose2d(2, 2, kernel_size=4, stride=2, padding=1)
self.deconv1_2 = nn.ConvTranspose2d(self.bottleneck, self.bottleneck, kernel_size=4, stride=2, padding=1)
self.gcn5 = GCN(2048, self.bottleneck)
self.gcn4 = GCN(1024, self.bottleneck)
self.gcn3 = GCN(512, self.bottleneck)
self.gcn2 = GCN(256, self.bottleneck)
self.br5 = BR(self.bottleneck)
self.br4_1 = BR(self.bottleneck)
self.br4_2 = BR(self.bottleneck)
self.br3_1 = BR(self.bottleneck)
self.br3_2 = BR(self.bottleneck)
self.br2_1 = BR(self.bottleneck)
self.br2_2 = BR(self.bottleneck)
self.br1_1 = BR(2)
self.br1_2 = BR(self.bottleneck)
self.br0_1 = BR(2)
self.br0_2 = BR(self.bottleneck)
self.conv_tcl_128 = nn.Conv2d(self.bottleneck, 2, kernel_size=1, stride=1, padding=0)
self.conv_tcl_256 = nn.Conv2d(self.bottleneck, 2, kernel_size=1, stride=1, padding=0)
self.conv_tcl_512 = nn.Conv2d(self.bottleneck, 2, kernel_size=1, stride=1, padding=0)
self.conv_tr = nn.Conv2d(self.bottleneck, 2, kernel_size=1, stride=1, padding=0)
self.conv_geo = nn.Sequential(
nn.Conv2d(self.bottleneck, self.bottleneck, kernel_size=3, stride=1, padding=1),
nn.Conv2d(self.bottleneck, 3, kernel_size=1, stride=1, padding=0) # geo(sin, cos, radii)
)
def forward(self, x):
C1, C2, C3, C4, C5 = self.backbone(x)
if self.backbone_name == 'vgg':
up5 = self.deconv5(C5)
up5 = F.relu(up5)
up4 = self.merge4(C4, up5)
up4 = F.relu(up4)
up3 = self.merge3(C3, up4)
up3 = F.relu(up3)
up2 = self.merge2(C2, up3)
up2 = F.relu(up2)
up1 = self.merge1(C1, up2)
elif self.backbone_name == 'resnet':
up5 = self.deconv5(self.br5(self.gcn5(C5)))
up4 = self.deconv4(self.br4_2(up5 + self.br4_1(self.gcn4(C4))))
up3 = self.deconv3(self.br3_2(up4 + self.br3_1(self.gcn3(C3))))
up2 = self.deconv2(self.br2_2(up3 + self.br2_1(self.gcn2(C2))))
up1 = self.br0(self.deconv1(self.br1(up2)))
elif self.backbone_name == 'resnet_gcn':
up5 = self.deconv5(self.br5(self.gcn5(C5)))
up4 = self.deconv4(self.br4_2(up5 + self.br4_1(self.gcn4(C4))))
up3 = self.deconv3(self.br3_2(up4 + self.br3_1(self.gcn3(C3))))
up2 = self.deconv2(self.br2_2(up3 + self.br2_1(self.gcn2(C2))))
tr_pred_map = self.br0_1(self.deconv1_1(self.br1_1(self.gcn1_1(up2))))
tcl_pred_map = self.br0_2(self.deconv1_2(self.br1_2(self.gcn1_2(up2))))
geo_pred_map = F.interpolate(self.conv1(up2), scale_factor=2, mode='nearest')
up1 = torch.cat((tr_pred_map, tcl_pred_map, geo_pred_map), dim=1)
elif self.backbone_name == 'resnet_gcn_new':
up5 = self.deconv5(self.br5(self.gcn5(C5)))
up4 = self.deconv4(self.br4_2(up5 + self.br4_1(self.gcn4(C4))))
up3 = self.deconv3(self.br3_2(up4 + self.br3_1(self.gcn3(C3))))
up2 = self.deconv2(self.br2_2(up3 + self.br2_1(self.gcn2(C2))))
tr_pred_map = self.br0_1(self.deconv1_1(self.br1_1(self.conv_tr(up2))))
tcl_pred_map = self.br0_2(self.deconv1_2(self.br1_2(self.conv_tcl(up2))))
geo_pred_map = F.interpolate(self.conv_geo(up2), scale_factor=2, mode='nearest')
up1 = torch.cat((tr_pred_map, tcl_pred_map, geo_pred_map), dim=1)
elif self.backbone_name == 'resnet_gcn_ms':
up5 = self.deconv5(self.br5(self.gcn5(C5)))
up4 = self.deconv4(self.br4_2(up5 + self.br4_1(self.gcn4(C4))))
up3 = self.deconv3(self.br3_2(up4 + self.br3_1(self.gcn3(C3))))
# 128*128
feature_map_128 = self.br2_2(up3 + self.br2_1(self.gcn2(C2)))
tr_pred_128 = self.conv_tr_128(feature_map_128) # N * 2 * 128 * 128
tcl_pred_128 = self.conv_tcl_128(feature_map_128) # N * 2 * 128 * 128
# 256*256
tr_feature_map_256 = self.br1_1(self.deconv2_1(feature_map_128 * torch.exp(tr_pred_128[:, 1:2].sigmoid())))
tr_pred_256 = self.conv_tr_256(tr_feature_map_256)
tcl_feature_map_256 = self.br1_2(self.deconv2_2(feature_map_128 * torch.exp(tcl_pred_128[:, 1:2].sigmoid())))
tcl_pred_256 = self.conv_tcl_256(tcl_feature_map_256)
# 512*512
tr_feature_map_512 = self.br0_1(self.deconv1_1(tr_feature_map_256 * torch.exp(tr_pred_256[:, 1:2].sigmoid())))
tr_pred_map = self.conv_tr_512(tr_feature_map_512)
tcl_feature_map_512 = self.br0_2(self.deconv1_2(tcl_feature_map_256 * torch.exp(tcl_pred_256[:, 1:2].sigmoid())))
tcl_pred_map = self.conv_tcl_512(tcl_feature_map_512)
geo_pred_map = F.interpolate(self.conv_geo(self.deconv2_3(feature_map_128)), scale_factor=2, mode='nearest')
up1 = torch.cat((tr_pred_map, tcl_pred_map, geo_pred_map), dim=1)
elif self.backbone_name == 'resnet_gcn_ms2':
up5 = self.deconv5(self.br5(self.gcn5(C5)))
up4 = self.deconv4(self.br4_2(up5 + self.br4_1(self.gcn4(C4))))
up3 = self.deconv3(self.br3_2(up4 + self.br3_1(self.gcn3(C3))))
# 128*128
feature_map_128 = self.br2_2(up3 + self.br2_1(self.gcn2(C2)))
tcl_pred_128 = self.conv_tcl_128(feature_map_128) # N * 2 * 128 * 128
# 256*256
tcl_feature_map_256 = self.br1_2(self.deconv2_2(feature_map_128 * torch.exp(tcl_pred_128.softmax(dim=1)[:, 1:2])))
tcl_pred_256 = self.conv_tcl_256(tcl_feature_map_256)
# 512*512
tcl_feature_map_512 = self.br0_2(self.deconv1_2(tcl_feature_map_256 * torch.exp(tcl_pred_256.softmax(dim=1)[:, 1:2])))
tcl_pred_map = self.conv_tcl_512(tcl_feature_map_512)
tr_pred_map = self.br0_1(self.deconv1_1(self.br1_1(self.deconv2_1(self.conv_tr(feature_map_128)))))
geo_pred_map = F.interpolate(self.conv_geo(self.deconv2_3(feature_map_128)), scale_factor=2, mode='nearest')
up1 = torch.cat((tr_pred_map, tcl_pred_map, geo_pred_map), dim=1)
return up1
```
#### File: TextSnake_pytorch/util/visualize.py
```python
import torch
import numpy as np
import cv2
import os
from skimage import measure, color
from model.detection_model.TextSnake_pytorch.util.config import config as cfg
from model.detection_model.TextSnake_pytorch.util.misc import fill_hole
def visualize_network_output(output, tr_mask, tcl_mask, prefix):
tr_pred = output[:, :2]
tr_score, tr_predict = tr_pred.max(dim=1)
tcl_pred = output[:, 2:4]
tcl_score, tcl_predict = tcl_pred.max(dim=1)
tr_predict = tr_predict.cpu().numpy()
tcl_predict = tcl_predict.cpu().numpy()
tr_target = tr_mask.cpu().numpy()
tcl_target = tcl_mask.cpu().numpy()
for i in range(len(tr_pred)):
tr_pred = (tr_predict[i] * 255).astype(np.uint8)
tr_targ = (tr_target[i] * 255).astype(np.uint8)
tcl_pred = (tcl_predict[i] * 255).astype(np.uint8)
tcl_targ = (tcl_target[i] * 255).astype(np.uint8)
tr_show = np.concatenate([tr_pred, tr_targ], axis=1)
tcl_show = np.concatenate([tcl_pred, tcl_targ], axis=1)
show = np.concatenate([tr_show, tcl_show], axis=0)
show = cv2.resize(show, (512, 512))
path = os.path.join(cfg.vis_dir, '{}_{}.png'.format(prefix, i))
cv2.imwrite(path, show)
def visualize_detection(image, tr, tcl, contours, illegal_contours=None):
image_show = image.copy()
image_show = np.ascontiguousarray(image_show[:, :, ::-1])
image_show = cv2.polylines(image_show, contours, True, (0, 0, 255), 3)
if illegal_contours is not None:
image_show = cv2.polylines(image_show, illegal_contours, True, (0, 255, 0), 3)
conts, _ = cv2.findContours(tcl.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cont in conts:
# remove small regions
if cv2.contourArea(cont) < 50:
tcl = cv2.fillPoly(tcl, [cont], 0)
tr = cv2.cvtColor((tr * 255).astype(np.uint8), cv2.COLOR_GRAY2BGR)
tcl = cv2.cvtColor((tcl * 255).astype(np.uint8), cv2.COLOR_GRAY2BGR)
# labels = measure.label(tcl, connectivity=2)
# tcl_color = color.label2rgb(labels) * 255
# # slightly enlarge for easier to get tcl
# kernel = np.ones((5, 5), np.uint8)
# tcl_color = cv2.dilate(tcl_color, kernel, iterations=2)
image_show = np.concatenate([image_show, tr, tcl], axis=1)
return image_show
# path = os.path.join(cfg.vis_dir, image_id)
# cv2.imwrite(path, image_show)
if __name__ == '__main__':
import json
import os
json_path = '.../result.json'
img_path = '.../test_images'
files = os.listdir(img_path)
with open(json_path, 'r') as f:
data = json.load(f)
for img_name in files:
image = cv2.imread(os.path.join(img_path, img_name))
poly = data[img_name.replace('.jpg', '').replace('gt', 'res')]
pts = np.array(poly['points']).astype(np.int32)
image_show = cv2.polylines(image, [pts], True, (0, 0, 255), 3)
cv2.imwrite(img_name, image_show)
```
#### File: maskrcnn_benchmark_architecture/demo/irra_infer.py
```python
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '5'
import cv2
import numpy as np
from maskrcnn_benchmark.config import cfg
from demo.predictor import ICDARDemo, RRPNDemo
from maskrcnn_benchmark.utils.visualize import vis_image, write_result_ICDAR_RRPN2polys, zip_dir
from maskrcnn_benchmark.data.datasets.irra_interface import get_irra_XXX
from PIL import Image
import time
import json
from tqdm import tqdm
from Pascal_VOC import eval_func
from link_boxes import merge
def res2json(result_dir):
res_list = os.listdir(result_dir)
res_dict = {}
for rf in tqdm(res_list):
if rf[-4:] == '.txt':
respath = os.path.join(result_dir, rf)
reslines = open(respath, 'r').readlines()
reskey = 'EEE/' + rf[13:-4]
res_dict[reskey] = [{'points':np.array(l.replace('\n', '').split(','), np.int).reshape(-1, 2).tolist()} for l in reslines]
json_tarf = os.path.join(result_dir, 'res.json')
if os.path.isfile(json_tarf):
print('Json file found, removing it...')
os.remove(json_tarf)
j_f = open(json_tarf, 'w')
json.dump(res_dict, j_f)
print('json dump done', json_tarf)
return json_tarf
def database_to_json(dataset_dir):
database = get_irra_XXX('val', dataset_dir, 'EEE')
json_name = 'data_cache/gt_val_irra.json'
if os.path.isfile(json_name):
print('json_name found, loading it...')
return json_name
data_dict = {}
for data_item in database:
data_code = 'EEE/' + data_item['image'].split('/')[-1].split('.')[0][10:]
print('data_code:', data_code)
data_dict[data_code] = [{'points':pts.reshape(-1, 2).tolist(), 'transcription':'111'} for pts in data_item['polys']]
j_f = open(json_name, 'w')
json.dump(data_dict, j_f)
print('json dump done', json_name)
return json_name
config_file = 'configs/irragular_det/e2e_faster_rcnn_R_50_C4_1x.yaml' #'#"configs/ICDAR2019_det_RRPN/e2e_rrpn_R_50_C4_1x_LSVT_val_4scales_angle_norm.yaml" #e2e_rrpn_R_50_C4_1x_ICDAR13_15_trial_test.yaml
# update the config options with the config file
cfg.merge_from_file(config_file)
# manual override some options
cfg.merge_from_list(["MODEL.DEVICE", "cuda"])
# cfg.freeze()
# cfg.MODEL.WEIGHT = 'models/IC-13-15-17-Trial/model_0155000.pth'
vis = False
merge_box = cfg.TEST.MERGE_BOX
result_dir = os.path.join('results', config_file.split('/')[-1].split('.')[0], cfg.MODEL.WEIGHT.split('/')[-1].split('.')[0])
if merge_box:
result_dir += '_merge_box'
if not os.path.isdir(result_dir):
os.makedirs(result_dir)
coco_demo = RRPNDemo(
cfg,
min_image_size=800,
confidence_threshold=0.6,
)
dataset_name = 'IRRA_another_nofilter' #'#cfg.TEST.DATASET_NAME
testing_dataset = {
'IRRA_another1': {
'testing_image_dir': '../datasets/picture/',
'gt_dir':'../datasets/TASK0407/coordinates/EEE/',
'off': [0, 162]
},
'IRRA_another1_denoise': {
'testing_image_dir': '../datasets/denoise_pic/',
'gt_dir':'../datasets/TASK0407/coordinates/EEE/',
'off': [0, 150]
},
'IRRA': {
'testing_image_dir': '../datasets/TASK0407/imshow_picture/EEE',
'gt_dir':'../datasets/TASK0407/coordinates/EEE/',
'off': [0, 162]
},
'IRRA_another_nofilter': {
'testing_image_dir': '../datasets/0514_nofilter/',
'gt_dir':'../datasets/TASK0407/coordinates/EEE/',
'off': [0, 162]
},
'ArT': {
'testing_image_dir': '../datasets/ArT/ArT_detect_train/train_images',
'off': [4000, 5603]
},
}
image_dir = testing_dataset[dataset_name]['testing_image_dir']
gt_dir = testing_dataset[dataset_name]['gt_dir']
# vocab_dir = testing_dataset[dataset_name]['test_vocal_dir']
off_group = testing_dataset[dataset_name]['off']
# load image and then run prediction
# image_dir = '../datasets/ICDAR13/Challenge2_Test_Task12_Images/'
# imlist = os.listdir(image_dir)[off_group[0]:off_group[1]]
gtlist = os.listdir(gt_dir)
gtlist.sort()
print('************* META INFO ***************')
print('config_file:', config_file)
print('result_dir:', result_dir)
print('image_dir:', image_dir)
print('weights:', cfg.MODEL.WEIGHT)
print('merge_box:', merge_box)
print('***************************************')
# print('gtlist:', gtlist)
#num_images = len(imlist)
cnt = 0
num_images = off_group[1] - off_group[0]
if dataset_name == 'IRRA':
for idx in range(off_group[0], off_group[1]):
gt_filename = gtlist[idx]
gt_code = gt_filename.split('.')[0]
image = 'nofilter_' + gt_code + '.jpg'
impath = os.path.join(image_dir, image)
# print('image:', impath)
img = cv2.imread(impath)
cnt += 1
tic = time.time()
predictions, bounding_boxes = coco_demo.run_on_opencv_image(img)
toc = time.time()
print('time cost:', str(toc - tic)[:6], '|', str(cnt) + '/' + str(num_images))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
bboxes_np = bounding_boxes.bbox.data.cpu().numpy()
bboxes_np[:, 2:4] /= cfg.MODEL.RRPN.GT_BOX_MARGIN
if merge_box:
bboxes_np_reverse = bboxes_np.copy()
bboxes_np_reverse[:, 2:4] = bboxes_np_reverse[:, 3:1:-1]
bboxes_np_reverse = merge(bboxes_np_reverse)
bboxes_np_reverse[:, 2:4] = bboxes_np_reverse[:, 3:1:-1]
bboxes_np = bboxes_np_reverse
width, height = bounding_boxes.size
if vis:
pil_image = vis_image(Image.fromarray(img), bboxes_np)
pil_image.show()
time.sleep(20)
else:
write_result_ICDAR_RRPN2polys(image[:-4], bboxes_np, threshold=0.7, result_dir=result_dir, height=height, width=width)
#im_file, dets, threshold, result_dir, height, width
#cv2.imshow('win', predictions)
#cv2.waitKey(0)
else:
testing_img = os.listdir(image_dir)
for imname in testing_img:
# gt_filename = gtlist[idx]
# gt_code = gt_filename.split('.')[0]
# image = 'nofilter_' + gt_code + '.jpg'
# impath = os.path.join(image_dir, image)
# print('image:', impath)
impath = os.path.join(image_dir, imname)
img = cv2.imread(impath)
cnt += 1
tic = time.time()
predictions, bounding_boxes = coco_demo.run_on_opencv_image(img)
toc = time.time()
print('time cost:', str(toc - tic)[:6], '|', str(cnt) + '/' + str(num_images))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
bboxes_np = bounding_boxes.bbox.data.cpu().numpy()
bboxes_np[:, 2:4] /= cfg.MODEL.RRPN.GT_BOX_MARGIN
if merge_box:
bboxes_np_reverse = bboxes_np.copy()
bboxes_np_reverse[:, 2:4] = bboxes_np_reverse[:, 3:1:-1]
bboxes_np_reverse = merge(bboxes_np_reverse)
bboxes_np_reverse[:, 2:4] = bboxes_np_reverse[:, 3:1:-1]
bboxes_np = bboxes_np_reverse
width, height = bounding_boxes.size
if vis:
pil_image = vis_image(Image.fromarray(img), bboxes_np)
pil_image.show()
time.sleep(20)
else:
write_result_ICDAR_RRPN2polys(imname[:-4], bboxes_np, threshold=0.7, result_dir=result_dir, height=height, width=width)
#im_file, dets, threshold, result_dir, height, width
#cv2.imshow('win', predictions)
#cv2.waitKey(0)
if dataset_name == 'IC15':
zipfilename = os.path.join(result_dir, 'submit_' + config_file.split('/')[-1].split('.')[0] + '_' + cfg.MODEL.WEIGHT.split('/')[-1].split('.')[0] + '.zip')
if os.path.isfile(zipfilename):
print('Zip file exists, removing it...')
os.remove(zipfilename)
zip_dir(result_dir, zipfilename)
comm = 'curl -i -F "submissionFile=@' + zipfilename + '" http://127.0.0.1:8080/evaluate'
# print(comm)
print(os.popen(comm, 'r'))
elif dataset_name == 'LSVT':
# input_json_path = 'results/e2e_rrpn_R_50_C4_1x_LSVT_val/model_0190000/res.json'
gt_json_path = '../datasets/LSVT/train_full_labels.json'
# to json
input_json_path = res2json(result_dir)
eval_func(input_json_path, gt_json_path)
elif dataset_name == 'ArT':
# input_json_path = 'results/e2e_rrpn_R_50_C4_1x_LSVT_val/model_0190000/res.json'
gt_json_path = '../datasets/ArT/ArT_detect_train/train_labels.json'
# to json
input_json_path = res2json(result_dir)
eval_func(input_json_path, gt_json_path)
elif dataset_name == 'IRRA':
gt_json_path = database_to_json('../datasets/TASK0407/')
# to json
input_json_path = res2json(result_dir)
eval_func(input_json_path, gt_json_path)
```
#### File: recognition_model/HARN/main.py
```python
import argparse
import os
import random
import io
import sys
import time
from models.moran import MORAN
import tools.utils as utils
import torch.optim as optim
import numpy as np
import torch.backends.cudnn as cudnn
import torch.utils.data
import tools.dataset as dataset
from torch.autograd import Variable
from collections import OrderedDict
from tools.logger import logger
# 是否导入数据集
# from wordlist import result
# from wordlistlsvt import result
import warnings
warnings.filterwarnings('ignore')
os.environ['CUDA_VISIBLE_DEVICES'] = '1' # 指定GPU
# os.environ['CUDA_VISIBLE_DEVICES'] = '5' # 指定GPU
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
parser = argparse.ArgumentParser()
# parser.add_argument('--train_nips', default='/home/chenjingye/moran/dataset/art_train', help='path to dataset')
parser.add_argument('--train_nips', default='/home/msy/datasets/OCR_dataset/train', help='path to dataset')
# parser.add_argument('--train_nips', default='dataset/lsvt_train', help='path to dataset')
# # 数据集地址 / 要修改
# parser.add_argument('--train_cvpr', default='dataset/art_train', help='path to dataset')
# parser.add_argument('--valroot', default='/home/chenjingye/moran/dataset/art_test', help='path to dataset')
parser.add_argument('--valroot', default='/home/msy/datasets/OCR_dataset/test', help='path to dataset')
# parser.add_argument('--valroot', default='dataset/lsvt_test', help='path to dataset')
# # 测试数据集地址 / 要修改
parser.add_argument('--workers', default=4, type=int, help='number of data loading workers')
parser.add_argument('--batchSize', type=int, default=128, help='input batch size')
# # 根据需要修改
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image to network')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image to network') # 输入到网络的原图大小,常用32*280,之前高度为64的时候报错
parser.add_argument('--targetH', type=int, default=32, help='the width of the input image to network')
parser.add_argument('--targetW', type=int, default=100, help='the width of the input image to network') # 英文字符32*100即可,中文字符可用32*280
parser.add_argument('--nh', type=int, default=256, help='size of the lstm hidden state') # 隐藏层单元数,无需修改
parser.add_argument('--niter', type=int, default=300, help='number of epochs to train for') # # 迭代次数,一轮要跑多少个epoch,CRNN一般跑50/60个epoch就收敛了,可以设很大的数值,然后收敛后停掉
parser.add_argument('--lr', type=float, default=1, help='learning rate for Critic, default=0.00005') # 学习率,此代码中最好是1(最优效果),此处无需修改
parser.add_argument('--cuda', action='store_false', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use') # 只能跑单卡
parser.add_argument('--MORAN', default='', help="path to model (to continue training)")
parser.add_argument('--alphabet', type=str,
default='0:1:2:3:4:5:6:7:8:9:a:b:c:d:e:f:g:h:i:j:k:l:m:n:o:p:q:r:s:t:u:v:w:x:y:z:$')
parser.add_argument('--sep', type=str, default=':')
parser.add_argument('--experiment', default='/home/msy/HARN_Moran/checkpoints/asrn_se50_OCRdata_50', help='Where to store samples and models')
# parser.add_argument('--experiment', default='checkpoint/asrn_se50_lsvt_50', help='Where to store samples and models')
# # 保存模型的路径 / 要修改
parser.add_argument('--displayInterval', type=int, default=100, help='Interval to be displayed') # 100轮显示一次,跑了多少epoch了
parser.add_argument('--n_test_disp', type=int, default=10, help='Number of samples to display when test') # 测试时的展示
parser.add_argument('--valInterval', type=int, default=1000, help='Interval to be displayed') # 隔多少轮验证一次
parser.add_argument('--saveInterval', type=int, default=4000, help='Interval to be displayed') # 隔多久保存一次,隔太久保存不到精度最高的模型,隔太少空间储存量会爆炸
parser.add_argument('--IndividualStep', default=50, help="epoch for training one model")
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is rmsprop)')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5') # 偏置,Adam的一阶矩估计
parser.add_argument('--adadelta', action='store_false', help='Whether to use adadelta (default is rmsprop)')
parser.add_argument('--sgd', action='store_true', help='Whether to use sgd (default is rmsprop)')
parser.add_argument('--BidirDecoder', action='store_false', help='Whether to use BidirDecoder')
opt = parser.parse_args()
print(opt) # 输出参数list
# Modify
# opt.alphabet = result
assert opt.ngpu == 1, "Multi-GPU training is not supported yet, due to the variant lengths of the text in a batch."
if opt.experiment is None:
opt.experiment = 'expr'
os.system('mkdir {0}'.format(opt.experiment))
opt.manualSeed = random.randint(1, 10000) # fix seed
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
#---------save logger---------#
log = logger('/home/msy/HARN_Moran/logger/asrn_se50_OCRdata_50')
# log = logger('./logger/asrn_se50_lsvt_50') # # 保存日志的路径 / 需要改
#-----------------------------#
if not torch.cuda.is_available():
assert not opt.cuda, 'You don\'t have a CUDA device.'
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
train_nips_dataset = dataset.lmdbDataset(root=opt.train_nips,
transform=dataset.resizeNormalize((opt.imgW, opt.imgH)),
reverse=opt.BidirDecoder)
assert train_nips_dataset
'''
train_cvpr_dataset = dataset.lmdbDataset(root=opt.train_cvpr,
transform=dataset.resizeNormalize((opt.imgW, opt.imgH)), reverse=opt.BidirDecoder)
assert train_cvpr_dataset
'''
'''
train_dataset = torch.utils.data.ConcatDataset([train_nips_dataset, train_cvpr_dataset])
'''
train_dataset = train_nips_dataset
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=opt.batchSize,
shuffle=False, sampler=dataset.randomSequentialSampler(train_dataset, opt.batchSize),
num_workers=int(opt.workers))
test_dataset = dataset.lmdbDataset(root=opt.valroot,
transform=dataset.resizeNormalize((opt.imgW, opt.imgH)), reverse=opt.BidirDecoder)
nclass = len(opt.alphabet.split(opt.sep)) # 一共有多少类,英文是36,中文就是wordlist,系统只认名字为wordlist.py的文件,记得将需要用的文件改为这个名字
nc = 1
converter = utils.strLabelConverterForAttention(opt.alphabet, opt.sep) # 给每个字一个编号,例如:中(2)国(30)人(65);convert是id和字符之间的转换
criterion = torch.nn.CrossEntropyLoss()
if opt.cuda:
MORAN = MORAN(nc, nclass, opt.nh, opt.targetH, opt.targetW, BidirDecoder=opt.BidirDecoder, CUDA=opt.cuda, log=log)
else:
MORAN = MORAN(nc, nclass, opt.nh, opt.targetH, opt.targetW, BidirDecoder=opt.BidirDecoder,
inputDataType='torch.FloatTensor', CUDA=opt.cuda, log=log)
if opt.MORAN != '':
print('loading pretrained model from %s' % opt.MORAN)
if opt.cuda:
state_dict = torch.load(opt.MORAN)
else:
state_dict = torch.load(opt.MORAN, map_location='cpu')
MORAN_state_dict_rename = OrderedDict()
for k, v in state_dict.items():
name = k.replace("module.", "") # remove `module.`
MORAN_state_dict_rename[name] = v
MORAN.load_state_dict(MORAN_state_dict_rename, strict=True)
image = torch.FloatTensor(opt.batchSize, nc, opt.imgH, opt.imgW)
text = torch.LongTensor(opt.batchSize * 5)
text_rev = torch.LongTensor(opt.batchSize * 5)
length = torch.IntTensor(opt.batchSize)
if opt.cuda:
MORAN.cuda()
MORAN = torch.nn.DataParallel(MORAN, device_ids=range(opt.ngpu))
image = image.cuda()
text = text.cuda()
text_rev = text_rev.cuda()
criterion = criterion.cuda()
image = Variable(image) # 把图片转换成 CUDA 可以识别的 Variable 变量
text = Variable(text)
text_rev = Variable(text_rev)
length = Variable(length)
# loss averager
loss_avg = utils.averager()
# setup optimizer # 优化器的选择,这里用的Adam
if opt.adam:
optimizer = optim.Adam(MORAN.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
elif opt.adadelta:
optimizer = optim.Adadelta(MORAN.parameters(), lr=opt.lr)
elif opt.sgd:
optimizer = optim.SGD(MORAN.parameters(), lr=opt.lr, momentum=0.9)
else:
optimizer = optim.RMSprop(MORAN.parameters(), lr=opt.lr)
def levenshtein(s1, s2): # 莱温斯坦距离,编辑距离的一种
if len(s1) < len(s2):
return levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def val(dataset, criterion, max_iter=10000, steps=None):
data_loader = torch.utils.data.DataLoader(
dataset, shuffle=False, batch_size=opt.batchSize, num_workers=int(opt.workers)) # opt.batchSize
val_iter = iter(data_loader)
max_iter = min(max_iter, len(data_loader))
n_correct = 0
n_total = 0
distance = 0.0
loss_avg = utils.averager()
#f = open('./log.txt', 'a', encoding='utf-8')
for i in range(max_iter): # 设置很大的循环数值(达不到此值就会收敛)
data = val_iter.next()
if opt.BidirDecoder:
cpu_images, cpu_texts, cpu_texts_rev = data # data是dataloader导入的东西
utils.loadData(image, cpu_images)
t, l = converter.encode(cpu_texts, scanned=True) # 这个encode是将字符encode成id
t_rev, _ = converter.encode(cpu_texts_rev, scanned=True)
utils.loadData(text, t)
utils.loadData(text_rev, t_rev)
utils.loadData(length, l)
preds0, preds1 = MORAN(image, length, text, text_rev, debug=False, test=True, steps=steps) # 跑模型HARN
cost = criterion(torch.cat([preds0, preds1], 0), torch.cat([text, text_rev], 0))
preds0_prob, preds0 = preds0.max(1) # 取概率最大top1的结果
preds0 = preds0.view(-1)
preds0_prob = preds0_prob.view(-1) # 维度的变形(好像是
sim_preds0 = converter.decode(preds0.data, length.data) # 将 id decode为字
preds1_prob, preds1 = preds1.max(1)
preds1 = preds1.view(-1)
preds1_prob = preds1_prob.view(-1)
sim_preds1 = converter.decode(preds1.data, length.data)
sim_preds = [] # 预测出来的字
for j in range(cpu_images.size(0)): # 对字典进行处理,把单个字符连成字符串
text_begin = 0 if j == 0 else length.data[:j].sum()
if torch.mean(preds0_prob[text_begin:text_begin + len(sim_preds0[j].split('$')[0] + '$')]).item() > \
torch.mean(preds1_prob[text_begin:text_begin + len(sim_preds1[j].split('$')[0] + '$')]).item():
sim_preds.append(sim_preds0[j].split('$')[0] + '$')
else:
sim_preds.append(sim_preds1[j].split('$')[0][-1::-1] + '$')
else: # 用不到的另一种情况
cpu_images, cpu_texts = data
utils.loadData(image, cpu_images)
t, l = converter.encode(cpu_texts, scanned=True)
utils.loadData(text, t)
utils.loadData(length, l)
preds = MORAN(image, length, text, text_rev, test=True)
cost = criterion(preds, text)
_, preds = preds.max(1)
preds = preds.view(-1)
sim_preds = converter.decode(preds.data, length.data)
loss_avg.add(cost) # 计算loss的平均值
for pred, target in zip(sim_preds, cpu_texts): # 与GroundTruth的对比,cpu_texts是GroundTruth,sim_preds是连接起来的字符串
if pred == target.lower(): # 完全匹配量
n_correct += 1
#f.write("pred %s\t target %s\n" % (pred, target))
distance += levenshtein(pred, target) / max(len(pred), len(target)) # 莱温斯坦距离
n_total += 1 # 完成了一个单词
#f.close()
# print and save # 跑完之后输出到日志中
for pred, gt in zip(sim_preds, cpu_texts):
gt = ''.join(gt.split(opt.sep))
print('%-20s, gt: %-20s' % (pred, gt))
print("correct / total: %d / %d, " % (n_correct, n_total))
print('levenshtein distance: %f' % (distance / n_total))
accuracy = n_correct / float(n_total)
log.scalar_summary('Validation/levenshtein distance', distance / n_total, steps)
log.scalar_summary('Validation/loss', loss_avg.val(), steps)
log.scalar_summary('Validation/accuracy', accuracy, steps)
print('Test loss: %f, accuray: %f' % (loss_avg.val(), accuracy))
return accuracy
def trainBatch(steps):
data = train_iter.next()
if opt.BidirDecoder:
cpu_images, cpu_texts, cpu_texts_rev = data
utils.loadData(image, cpu_images)
t, l = converter.encode(cpu_texts, scanned=True)
t_rev, _ = converter.encode(cpu_texts_rev, scanned=True)
utils.loadData(text, t)
utils.loadData(text_rev, t_rev)
utils.loadData(length, l)
preds0, preds1 = MORAN(image, length, text, text_rev)
cost = criterion(torch.cat([preds0, preds1], 0), torch.cat([text, text_rev], 0))
else:
cpu_images, cpu_texts = data
utils.loadData(image, cpu_images)
t, l = converter.encode(cpu_texts, scanned=True)
utils.loadData(text, t)
utils.loadData(length, l)
preds = MORAN(image, length, text, text_rev)
cost = criterion(preds, text)
MORAN.zero_grad()
cost.backward() # 反向传播
optimizer.step() # 优化器
return cost
if __name__ == '__main__':
t0 = time.time()
acc, acc_tmp = 0, 0
print(' === HARN === ')
for epoch in range(opt.niter):
# --------------------------stage train-------------------------------# 分段训练,不需要
# boundary = int(opt.IndividualStep)
# print('stage train:', boundary)
# if boundary >= epoch >= 0:
# optimizer = optim.Adadelta(MORAN.module.ASRN.parameters(), lr=opt.lr)
# elif 2 * boundary >= epoch > boundary:
# optimizer = optim.Adadelta(MORAN.module.MORN.parameters(), lr=opt.lr)
# else:
# optimizer = optim.Adadelta(MORAN.parameters(), lr=opt.lr)
# --------------------------------------------------------------------#
print(" === Loading Train Data ===")
train_iter = iter(train_loader)
i = 0
while i < len(train_loader): # len():数据大小
# print("main函数里,可迭代次数为 %d" % len(train_loader))
steps = i + epoch * len(train_loader) # step用来计算什么时候进行存储/打印
if steps % opt.valInterval == 0:
for p in MORAN.parameters():
p.requires_grad = False
MORAN.eval()
print('-------------------------------') # train的一些打印信息
acc_tmp = val(test_dataset, criterion, steps=steps)
if acc_tmp > acc:
acc = acc_tmp
try:
time.sleep(0.01)
torch.save(MORAN.state_dict(), '{0}/{1}_{2}.pth'.format(opt.experiment, i, str(acc)[:6]))
except RuntimeError:
print("RuntimeError")
pass
for p in MORAN.parameters():
p.requires_grad = True
MORAN.train()
cost = trainBatch(steps)
loss_avg.add(cost)
if i % opt.displayInterval == 0:
t1 = time.time() # niter是参数部分设置的epoch数量
print('Epoch: %d/%d; iter: %d/%d; Loss: %f; time: %.2f s;' %
(epoch, opt.niter, i, len(train_loader), loss_avg.val(), t1 - t0)),
log.scalar_summary('train loss', loss_avg.val(), i) # 拟合到90多/拟合到1,完全收敛,训练充分
log.scalar_summary('speed batches/persec', i / (time.time() - t0), i)
loss_avg.reset()
t0 = time.time()
'''
t1 = time.time() # niter是参数部分设置的epoch数量
print('Epoch: %d/%d; iter: %d/%d; Loss: %f; time: %.2f s;' %
(epoch, opt.niter, i, len(train_loader), loss_avg.val(), t1 - t0)),
log.scalar_summary('train loss', loss_avg.val(), steps) # 拟合到90多/拟合到1,完全收敛,训练充分
log.scalar_summary('speed batches/persec', steps / (time.time() - t0), steps)
loss_avg.reset()
t0 = time.time()
if steps % opt.displayInterval == 0:
t1 = time.time() # niter是参数部分设置的epoch数量
print('Epoch: %d/%d; iter: %d/%d; Loss: %f; time: %.2f s;' %
(epoch, opt.niter, i, len(train_loader), loss_avg.val(), t1 - t0)),
log.scalar_summary('train loss', loss_avg.val(), steps) # 拟合到90多/拟合到1,完全收敛,训练充分
log.scalar_summary('speed batches/persec', steps / (time.time() - t0), steps)
loss_avg.reset()
t0 = time.time()
'''
i += 1
```
#### File: HARN/models/convnet.py
```python
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from collections import OrderedDict
from senet.se_module import SELayer
from torchsummary import summary
class DefaultCNN(nn.Module):
def __init__(self, imgH, nc, leakyRelu=False):
super(DefaultCNN, self).__init__()
assert imgH % 16 == 0, 'Image height has to be a multiple of 16'
ks = [3, 3, 3, 3, 3, 3, 2]
ps = [1, 1, 1, 1, 1, 1, 0]
ss = [1, 1, 1, 1, 1, 1, 1]
nm = [64, 128, 256, 256, 512, 512, 512]
cnn = nn.Sequential()
def convRelu(i, batchNormalization=False):
nIn = nc if i == 0 else nm[i - 1]
nOut = nm[i]
cnn.add_module('conv{0}'.format(i),
nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i]))
if batchNormalization:
cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut))
if leakyRelu:
cnn.add_module('relu{0}'.format(i), nn.LeakyReLU(0.2, inplace=True))
else:
cnn.add_module('relu{0}'.format(i), nn.ReLU(True))
convRelu(0, True)
cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2))
convRelu(1, True)
cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2))
convRelu(2, True)
convRelu(3, True)
cnn.add_module('pooling{0}'.format(2), nn.MaxPool2d((2, 2), (2, 1), (0, 1)))
convRelu(4, True)
convRelu(5, True)
cnn.add_module('pooling{0}'.format(3), nn.MaxPool2d((2, 2), (2, 1), (0, 1)))
convRelu(6, True)
self.cnn = cnn
print("Initializing cnn net weights...")
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, input):
conv = self.cnn(input)
return conv
def defaultcnn(**kwargs):
model = DefaultCNN(imgH=32, nc=1)
return model
# -------------------------------------------------------------------------------#
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size * growth_rate,
kernel_size=1, stride=1, bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1, bias=False)),
self.drop_rate = drop_rate
def forward(self, x):
new_features = super(_DenseLayer, self).forward(x)
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return torch.cat([x, new_features], 1)
class _DenseBlock(nn.Sequential):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate)
self.add_module('denselayer%d' % (i + 1), layer)
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features, iblock):
super(_Transition, self).__init__()
assert iblock < 4, "There are maximal 4 blocks."
self.ks = [2, 2, 2]
self.h_ss = [2, 2, 2]
self.w_ss = [1, 1, 1]
self.w_pad = [1, 1, 1]
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d((self.ks[iblock], self.ks[iblock]),
(self.h_ss[iblock], self.w_ss[iblock]),
(0, self.w_pad[iblock])))
class DenseNet(nn.Module):
def __init__(self, num_in, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0):
super(DenseNet, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(num_in, num_init_features, kernel_size=3, stride=2, padding=1, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=2, stride=2)),
]))
num_features = num_init_features
# Each denseblock
for i, num_layers in enumerate(block_config):
block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2, iblock=i)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
# Official init from torch repo
# print("Initializing Dense net weights...")
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
features = self.features(x)
out = self.relu(features)
# out = F.avg_pool2d(out, kernel_size=7, stride=1).view(features.size(0), -1)
return out
def DenseNet121(**kwargs):
print("Initializing DenseNet121 net weights...")
model = DenseNet(num_in=1, num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16),
**kwargs)
return model
def DenseNet169(**kwargs):
print("Initializing DenseNet169 net weights...")
model = DenseNet(num_in=1, num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32),
**kwargs)
return model
def DenseNet201(**kwargs):
print("Initializing DenseNet201 net weights...")
model = DenseNet(num_in=1, num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32),
**kwargs)
return model
#######Resnet#######
def conv3x3(in_planes, out_planes, stride=(1, 1)):
return nn.Conv2d(in_planes, out_planes, kernel_size=3,
stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=(1, 1), downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=(1, 1), downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, num_in, block, layers):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(num_in, 64, kernel_size=7,
stride=1, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu1 = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=(2, 2))
self.layer2 = self._make_layer(block, 128, layers[1], stride=(2, 1))
self.layer3 = self._make_layer(block, 256, layers[2], stride=(2, 1))
self.layer4 = self._make_layer(block, 512, layers[3], stride=(2, 1))
# Official init from torch repo
# print("Initializing Resnet net weights...")
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=(1, 1)):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion), )
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def ResNet18(**kwargs):
print("Initializing Resnet18 net weights...")
model = ResNet(num_in=1, block=BasicBlock, layers=[2, 2, 2, 2], **kwargs)
return model
def ResNet34(**kwargs):
print("Initializing Resnet34 net weights...")
model = ResNet(num_in=1, block=BasicBlock, layers=[3, 4, 6, 3], **kwargs)
return model
def ResNet50(**kwargs):
print("Initializing Resnet50 net weights...")
model = ResNet(num_in=1, block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)
return model
def ResNet101(**kwargs):
model = ResNet(num_in=1, block=Bottleneck, layers=[3, 4, 23, 3], **kwargs)
return model
def ResNet101(**kwargs):
model = ResNet(num_in=1, block=Bottleneck, layers=[3, 4, 36, 3], **kwargs)
return model
#######-----------SEResnet-----------#######
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class SEBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=16):
super(SEBasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1)
self.bn2 = nn.BatchNorm2d(planes)
self.se = SELayer(planes, reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=16):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se = SELayer(planes * 4, reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEResNet(nn.Module):
def __init__(self, num_in, block, layers):
self.inplanes = 64
super(SEResNet, self).__init__()
self.conv1 = nn.Conv2d(num_in, 64, kernel_size=7,
stride=1, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu1 = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=(2, 2))
self.layer2 = self._make_layer(block, 128, layers[1], stride=(2, 1))
self.layer3 = self._make_layer(block, 256, layers[2], stride=(2, 1))
self.layer4 = self._make_layer(block, 512, layers[3], stride=(2, 1))
# Official init from torch repo
# print("Initializing SEResNet weights...")
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=(1, 1)):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion), )
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def se_resnet18(**kwargs):
print("Initializing SE_Resnet18 net weights...")
model = SEResNet(num_in=1, block=SEBasicBlock, layers=[2, 2, 2, 2], **kwargs)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model
def se_resnet34(**kwargs):
print("Initializing SE_Resnet34 net weights...")
model = SEResNet(num_in=1, block=SEBasicBlock, layers=[3, 4, 6, 3], **kwargs)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model
def se_resnet50(**kwargs):
print("Initializing SE_Resnet50 net weights...")
model = SEResNet(num_in=1, block=SEBottleneck, layers=[3, 4, 6, 3], **kwargs)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model
def se_resnet101(**kwargs):
model = SEResNet(num_in=3, block=SEBottleneck, layers=[3, 4, 23, 3], **kwargs)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model
def se_resnet152(**kwargs):
model = SEResNet(num_in=3, block=SEBottleneck, layers=[3, 8, 36, 3], **kwargs)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model
#######-----------SEcnn-----------#######
class selayer(nn.Module):
def __init__(self, i):
super(selayer, self).__init__()
ks = [3, 3, 3, 3, 3, 3, 2]
ps = [1, 1, 1, 1, 1, 1, 0]
ss = [1, 1, 1, 1, 1, 1, 1]
nm = [64, 128, 256, 256, 512, 512, 512]
nIn = nc if i == 0 else nm[i - 1]
nOut = nm[i]
self.conv = nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i])
self.bn = nn.BatchNorm2d(nOut)
self.relu = nn.ReLU(inplace=True)
self.se = SELayer(nOut, 16)
def forward(self, x):
out = self.conv(x)
out = self.bn(out)
out = self.relu(out)
out = self.se(out)
return out
class SECNN(nn.Module):
def __init__(self, imgH, nc, leakyRelu=False):
super(SECNN, self).__init__()
assert imgH % 16 == 0, 'Image height has to be a multiple of 16'
self.layer0 = nn.Conv2d(nc, 64, 3, 1, 1)
self.bn0 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.pool0 = nn.MaxPool2d(2, 2)
self.layer1 = selayer(1)
self.pool1 = nn.MaxPool2d(2, 2)
self.layer2 = selayer(2)
self.layer3 = selayer(3)
self.pool2 = nn.MaxPool2d((2, 2), (2, 1), (0, 1))
self.layer4 = selayer(4)
self.layer5 = selayer(5)
self.pool3 = nn.MaxPool2d((2, 2), (2, 1), (0, 1))
self.layer6 = selayer(6)
# self.conv6 = nn.Conv2d(512, 512, 2, 1, 0)
# self.bn6 = nn.BatchNorm2d(512)
print("Initializing secnn weights...")
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, input):
# print('input:', input.shape)
x = self.layer0(input)
x = self.bn0(x)
x = self.relu(x)
x = self.pool0(x)
x = self.layer1(x)
x = self.pool1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.pool2(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.pool3(x)
x = self.layer6(x)
# x = self.conv6(x)
# x = self.bn6(x)
# print ('secnn_out:',x.shape)
return x
def secnn(**kwargs):
model = SECNN(imgH=32, nc=1)
# print(model)
return model
if __name__ == '__main__':
model = secnn().cuda()
summary(model, (3, 32, 100))
```
#### File: models/.ipynb_checkpoints/morn-checkpoint.py
```python
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
class MORN(nn.Module):
def __init__(self, nc, targetH, targetW, inputDataType='torch.cuda.FloatTensor', maxBatch=256, CUDA=True, log=None):
super(MORN, self).__init__()
self.targetH = targetH
self.targetW = targetW
self.inputDataType = inputDataType
self.maxBatch = maxBatch
self.cuda = CUDA
self.log = log
self.cnn = nn.Sequential(
nn.MaxPool2d(2, 2),
nn.Conv2d(nc, 64, 3, 1, 1), nn.BatchNorm2d(64), nn.ReLU(True), nn.MaxPool2d(2, 2),
nn.Conv2d(64, 128, 3, 1, 1), nn.BatchNorm2d(128), nn.ReLU(True), nn.MaxPool2d(2, 2),
nn.Conv2d(128, 64, 3, 1, 1), nn.BatchNorm2d(64), nn.ReLU(True),
nn.Conv2d(64, 16, 3, 1, 1), nn.BatchNorm2d(16), nn.ReLU(True),
nn.Conv2d(16, 2, 3, 1, 1), nn.BatchNorm2d(2)
)
self.pool = nn.MaxPool2d(2, 1)
h_list = np.arange(self.targetH) * 2. / (self.targetH - 1) - 1
w_list = np.arange(self.targetW) * 2. / (self.targetW - 1) - 1
grid = np.meshgrid(
w_list,
h_list,
indexing='ij'
)
grid = np.stack(grid, axis=-1)
grid = np.transpose(grid, (1, 0, 2))
grid = np.expand_dims(grid, 0)
grid = np.tile(grid, [maxBatch, 1, 1, 1])
grid = torch.from_numpy(grid).type(self.inputDataType)
if self.cuda:
grid = grid.cuda()
self.grid = Variable(grid, requires_grad=False)
self.grid_x = self.grid[:, :, :, 0].unsqueeze(3)
self.grid_y = self.grid[:, :, :, 1].unsqueeze(3)
def forward(self, x, test, enhance=1, debug=False, steps=None):
if not test and np.random.random() > 0.5:
return nn.functional.interpolate(x, size=(self.targetH, self.targetW), mode='bilinear')
if not test:
enhance = 0
assert x.size(0) <= self.maxBatch
assert x.data.type() == self.inputDataType
grid = self.grid[:x.size(0)]
grid_x = self.grid_x[:x.size(0)]
grid_y = self.grid_y[:x.size(0)]
x_small = nn.functional.interpolate(x, size=(self.targetH, self.targetW), mode='bilinear')
offsets = self.cnn(x_small)
offsets_posi = nn.functional.relu(offsets, inplace=False)
offsets_nega = nn.functional.relu(-offsets, inplace=False)
offsets_pool = self.pool(offsets_posi) - self.pool(offsets_nega)
offsets_grid = nn.functional.grid_sample(offsets_pool, grid)
offsets_grid = offsets_grid.permute(0, 2, 3, 1).contiguous()
offsets_grid_x = offsets_grid[:, :, :, 0].unsqueeze(3)
offsets_grid_y = offsets_grid[:, :, :, 1].unsqueeze(3)
offsets_x = torch.cat([grid_x + offsets_grid_x, grid_y + offsets_grid_y], 3)
# offsets_x = torch.cat([grid_x, grid_y + offsets_grid], 3)
x_rectified = nn.functional.grid_sample(x, offsets_x)
for iteration in range(enhance):
offsets = self.cnn(x_rectified)
offsets_posi = nn.functional.relu(offsets, inplace=False)
offsets_nega = nn.functional.relu(-offsets, inplace=False)
offsets_pool = self.pool(offsets_posi) - self.pool(offsets_nega)
offsets_grid += nn.functional.grid_sample(offsets_pool, grid).permute(0, 2, 3, 1).contiguous()
offsets_grid_x = offsets_grid[:, :, :, 0].unsqueeze(3)
offsets_grid_y = offsets_grid[:, :, :, 1].unsqueeze(3)
offsets_x = torch.cat([grid_x + offsets_grid_x, grid_y + offsets_grid_y], 3)
# offsets_x = torch.cat([grid_x, grid_y + offsets_grid], 3)
x_rectified = nn.functional.grid_sample(x, offsets_x)
if debug:
offsets_mean = torch.mean(offsets_grid.view(x.size(0), -1), 1)
offsets_max, _ = torch.max(offsets_grid.view(x.size(0), -1), 1)
offsets_min, _ = torch.min(offsets_grid.view(x.size(0), -1), 1)
import matplotlib.pyplot as plt
from colour import Color
from torchvision import transforms
import cv2
alpha = 0.7
density_range = 256
cmap = plt.get_cmap("rainbow")
blue = Color("blue")
hex_colors = list(blue.range_to(Color("red"), density_range))
rgb_colors = [[rgb * 255 for rgb in color.rgb] for color in hex_colors][::-1]
to_pil_image = transforms.ToPILImage()
for i in range(1):
img_small = x_small[i].data.cpu().mul_(0.5).add_(0.5)
img = to_pil_image(img_small)
img = np.array(img)
if len(img.shape) == 2:
img = cv2.merge([img.copy()] * 3)
img_copy_x = img.copy()
img_copy_y = img.copy()
v_max = offsets_max.data[i].cpu()
v_min = offsets_min.data[i].cpu()
img_offsets_x = (offsets_grid[i][:, :, 0]).view(1, self.targetH, self.targetW).data.cpu().add_(-v_min).mul_(
1. / (v_max - v_min))
img_offsets_y = (offsets_grid[i][:, :, 1]).view(1, self.targetH, self.targetW).data.cpu().add_(-v_min).mul_(
1. / (v_max - v_min))
img_offsets_x = to_pil_image(img_offsets_x)
img_offsets_y = to_pil_image(img_offsets_y)
img_offsets_x = np.array(img_offsets_x)
img_offsets_y = np.array(img_offsets_y)
color_map_x = np.empty([self.targetH, self.targetW, 3], dtype=int)
color_map_y = np.empty([self.targetH, self.targetW, 3], dtype=int)
for h_i in range(self.targetH):
for w_i in range(self.targetW):
color_map_x[h_i][w_i] = rgb_colors[int(img_offsets_x[h_i, w_i] / 256. * density_range)]
color_map_y[h_i][w_i] = rgb_colors[int(img_offsets_y[h_i, w_i] / 256. * density_range)]
color_map_x = color_map_x.astype(np.uint8)
color_map_y = color_map_y.astype(np.uint8)
cv2.addWeighted(color_map_x, alpha, img_copy_x, 1 - alpha, 0, img_copy_x)
cv2.addWeighted(color_map_y, alpha, img_copy_y, 1 - alpha, 0, img_copy_y)
img_processed = x_rectified[i].data.cpu().mul_(0.5).add_(0.5)
img_processed = to_pil_image(img_processed)
img_processed = np.array(img_processed)
if len(img_processed.shape) == 2:
img_processed = cv2.merge([img_processed.copy()] * 3)
total_img = np.ones([self.targetH, self.targetW * 4 + 15, 3], dtype=int) * 255
total_img[0:self.targetH, 0:self.targetW] = img
total_img[0:self.targetH, self.targetW + 5:2 * self.targetW + 5] = img_copy_x
total_img[0:self.targetH, self.targetW * 2 + 10:3 * self.targetW + 10] = img_copy_y
total_img[0:self.targetH, self.targetW * 3 + 15:4 * self.targetW + 15] = img_processed
total_img = cv2.resize(total_img.astype(np.uint8), (800, 100))
# cv2.imshow("Input_Offsets_Output", total_img)
# cv2.waitKey()
self.log.image_summary('attention_map', [total_img], steps)
# cv2.imwrite('attention_map', total_img)
# return x_rectified, total_img
return x_rectified
return x_rectified
```
#### File: MORAN_V2/module/crann.py
```python
import models.convnet as ConvNets
import models.recurrent as SeqNets
import torch.nn as nn
import torch.nn.parallel
import torch.distributed as dist
class CRANN(nn.Module):
def __init__(self, crann_config, n_class):
super(CRANN, self).__init__()
self.ngpu = crann_config['N_GPU']
cnn_conf = crann_config['CNN']
print('Constructing {}'.format(cnn_conf['MODEL']))
self.cnn = ConvNets.__dict__[cnn_conf['MODEL']]()
rnn_conf = crann_config['RNN']
print('Constructing {}'.format(rnn_conf['MODEL']))
self.rnn = SeqNets.__dict__[rnn_conf['MODEL']](rnn_conf, n_class)
def forward(self, input):
c_feat = data_parallel(self.cnn, input, self.ngpu)
b, c, h, w = c_feat.size()
#print("feature size, b:{0}, c:{1}, h:{2}, w:{3}".format(b, c, h, w))
assert h == 1, "the height of the conv must be 1"
c_feat = c_feat.squeeze(2)
c_feat = c_feat.permute(2, 0, 1) # [w, b, c]
output = data_parallel(self.rnn, c_feat, self.ngpu, dim=1)
return output
def data_parallel(model, input, ngpu, dim=0):
#dist.init_process_group(init_method='file:///workspace/mnt/group/ocr-fd-group/zhangpeiyao/CRNN/zhang/sharedfile',backend="gloo",world_size=4,group_name="pytorch_test")
if isinstance(input.data, torch.cuda.FloatTensor) and ngpu > 1:
output = nn.parallel.data_parallel(model, input, range(ngpu), dim=dim)
#output = nn.parallel.DistributedDataParallel(model, input, range(ngpu), dim=dim)
else:
output = model(input.cuda())#####################
return output
```
#### File: DocumentSRModel/databuilder/rvl_cdip.py
```python
import os
import cv2
import numpy as np
import random
from tqdm import tqdm
TEMP_CACHE_NAME = './~temp.png'
gaussian_blur_params = [1, 3, 3, 3, 3, 3, 5]
def build_dataset(data_dir, new_dir='datasets', dataset_name='rvl-cdip', mode='train'):
origin_dir = os.path.join(data_dir, dataset_name)
label_path = os.path.join(origin_dir, 'labels', mode+'.txt')
image_dir = os.path.join(origin_dir, 'images')
local_dir = os.path.join(new_dir, dataset_name+'_'+mode)
train_dir = os.path.join(new_dir, dataset_name+'_'+mode+'_train')
valid_dir = os.path.join(new_dir, dataset_name+'_'+mode+'_valid')
test_dir = os.path.join(new_dir, dataset_name+'_'+mode+'_test')
if not os.path.exists(origin_dir):
print(origin_dir)
raise Exception('Original dataset path not exists')
if not os.path.exists(local_dir):
os.makedirs(local_dir)
if not os.path.exists(train_dir):
os.makedirs(train_dir)
if not os.path.exists(test_dir):
os.makedirs(test_dir)
if not os.path.exists(valid_dir):
os.makedirs(valid_dir)
label_file = open(label_path, 'r')
res_dict = {}
for idx, imgline in tqdm(enumerate(label_file)):
res = imgline.split(' ')
img_path, label = res[0], res[1]
img_name = img_path.split('/')[-1]
# load origin image
if not os.path.exists(os.path.join(image_dir, img_path)):
print('! Image is not exists:' + img_path)
continue
else:
hr_img = cv2.imread(os.path.join(image_dir, img_path))
if hr_img is None:
print('! Image is None:' + img_path)
continue
if label not in res_dict.keys():
res_dict[label] = [(img_path, img_name)]
else: res_dict[label].append((img_path, img_name))
# cv2.imwrite(os.path.join(local_dir, img_name), hr_img)
idx = 0
for key in tqdm(res_dict.keys()):
for img_path, img_name in res_dict[key]:
hr_img = cv2.imread(os.path.join(image_dir, img_path))
if idx % 10 == 0:
cv2.imwrite(os.path.join(test_dir, img_name), hr_img)
elif idx % 10 == 1:
cv2.imwrite(os.path.join(valid_dir, img_name), hr_img)
else:
cv2.imwrite(os.path.join(train_dir, img_name), hr_img)
idx += 1
# def build_dataset(data_dir, new_dir='datasets', dataset_name='rvl-cdip', mode='train'):
# origin_dir = os.path.join(data_dir, dataset_name)
# label_path = os.path.join(origin_dir, 'labels', mode+'.txt')
# image_dir = os.path.join(origin_dir, 'images')
# local_dir = os.path.join(new_dir, dataset_name+'_'+mode)
# if not os.path.exists(origin_dir):
# print(origin_dir)
# raise Exception('Original dataset path not exists')
# if not os.path.exists(local_dir):
# os.makedirs(local_dir)
# os.makedirs(os.path.join(local_dir, 'LR'))
# os.makedirs(os.path.join(local_dir, 'LRN'))
# os.makedirs(os.path.join(local_dir, 'HR'))
# label_file = open(label_path, 'r')
# for idx, imgline in tqdm(enumerate(label_file)):
# img_path = imgline.split(' ')[0]
# img_name = img_path.split('/')[-1]
# # load origin image
# if not os.path.exists(os.path.join(image_dir, img_path)):
# print('! Image is not exists:' + img_path)
# continue
# else:
# hr_img = cv2.imread(os.path.join(image_dir, img_path))
# if hr_img is None:
# print('! Image is None:' + img_path)
# continue
# # build general low resolution image
# lr_img = cv2.resize(hr_img, None, None, 0.5, 0.5)
# lrn_img = lr_img.copy()
# # build noisy low resolution image
# prob = random.random()
# if prob <= 0.45:
# degradation = 'compression'
# elif prob <= 0.85:
# degradation = 'gaussian blur'
# elif prob <= 0.7:
# degradation = 'gaussian noise'
# elif prob < 0.8:
# degradation = 'salt pepper noise'
# # additional degradation
# if degradation == 'compression':
# r1 = np.random.randint(5, 95)
# r2 = np.random.randint(2, 10)
# cv2.imwrite(TEMP_CACHE_NAME, lr_img, [int(cv2.IMWRITE_JPEG_QUALITY), r1])
# lrn_img = cv2.imread(TEMP_CACHE_NAME)
# cv2.imwrite(TEMP_CACHE_NAME, lrn_img, [int(cv2.IMWRITE_PNG_COMPRESSION), r2])
# lrn_img = cv2.imread(TEMP_CACHE_NAME)
# elif degradation == 'gaussian blur':
# r = int(np.random.choice(gaussian_blur_params))
# lrn_img = cv2.GaussianBlur(lr_img, (r, r), 0)
# elif degradation == 'salt pepper noise':
# pass
# cv2.imwrite(os.path.join(local_dir, 'HR', img_name), hr_img)
# cv2.imwrite(os.path.join(local_dir, 'LR', img_name), lr_img)
# cv2.imwrite(os.path.join(local_dir, 'LRN', img_name), lrn_img)
# if os.path.exists(TEMP_CACHE_NAME):
# os.remove(TEMP_CACHE_NAME)
```
#### File: super_resolution_model/DocumentSRModel/dataloader.py
```python
import os
from PIL import Image, ImageFilter
import cv2
import random
import numpy as np
import torch
import torch.utils.data as Data
import torchvision.transforms as Transforms
def is_image_file(filename):
return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg", ".bmp", '.tif', '.tiff'])
def load_img(filepath, type='RGB'):
img = Image.open(filepath).convert(type)
return img
def calculate_valid_crop_size(crop_size, scale_factor):
return crop_size - (crop_size % scale_factor)
class TrainDataset(Data.Dataset):
def __init__(self, image_dir, crop_size=512, scale_factor=4,
random_scale=True, rotate=True, fliplr=True, fliptb=True):
super(TrainDataset, self).__init__()
self.image_dir = image_dir
self.image_filenames = []
self.image_filenames.extend(os.path.join(image_dir, x)
for x in sorted(os.listdir(image_dir))
if is_image_file(x))
self.crop_size = crop_size
self.scale_factor = scale_factor
self.random_scale = random_scale
self.rotate = rotate
self.fliplr = fliplr
self.fliptb = fliptb
h_w_scale = 1
self.crop_size_h = calculate_valid_crop_size(self.crop_size // h_w_scale, self.scale_factor)
self.crop_size_w = self.crop_size_h * h_w_scale
def __getitem__(self, index):
# load image
img = load_img(self.image_filenames[index])
# determine valid HR image size with scale factor
hr_img_w = self.crop_size_w
hr_img_h = self.crop_size_h
# determine LR image size
lr_img_w_2x = hr_img_w // (self.scale_factor // 2)
lr_img_h_2x = hr_img_h // (self.scale_factor // 2)
lr_img_w_4x = hr_img_w // self.scale_factor
lr_img_h_4x = hr_img_h // self.scale_factor
# random scaling between [0.5, 1.0]
if self.random_scale:
eps = 1e-3
ratio = random.randint(5, 10) * 0.1
if hr_img_w * ratio < self.crop_size_w:
ratio = self.crop_size_W / hr_img_w + eps
if hr_img_h * ratio < self.crop_size_h:
ratio = self.crop_size_h / hr_img_h + eps
scale_w = int(hr_img_w * ratio)
scale_h = int(hr_img_h * ratio)
transform = Transforms.Resize(
(scale_h, scale_w), interpolation=Image.ANTIALIAS)
img = transform(img)
# random crop on image
transform = Transforms.RandomCrop((self.crop_size_h, self.crop_size_w))
img = transform(img)
# random rotation between [90, 180, 270] degrees
if self.rotate:
rv = random.randint(0, 3)
img = img.rotate(90 * rv, expand=True)
# random horizontal flip
if self.fliplr:
transform = Transforms.RandomHorizontalFlip()
img = transform(img)
# random vertical flip
if self.fliptb:
if random.random() < 0.5:
img = img.transpose(Image.FLIP_TOP_BOTTOM)
hr_img = Transforms.CenterCrop((hr_img_h, hr_img_w))(img)
lr2x_img = Transforms.Resize((lr_img_h_2x, lr_img_w_2x), interpolation=Image.ANTIALIAS)(hr_img)
lr4x_img = Transforms.Resize((lr_img_h_4x, lr_img_w_4x), interpolation=Image.ANTIALIAS)(hr_img)
bc2x_img = Transforms.Resize((lr_img_h_2x, lr_img_w_2x), interpolation=Image.BICUBIC)(lr4x_img)
bc4x_img = Transforms.Resize((hr_img_h, hr_img_w), interpolation=Image.BICUBIC)(lr4x_img)
# Tensor Transform
img_transform = Transforms.ToTensor()
hr_img = img_transform(hr_img)
lr2x_img = img_transform(lr2x_img)
lr4x_img = img_transform(lr4x_img)
bc2x_img = img_transform(bc2x_img)
bc4x_img = img_transform(bc4x_img)
# print(hr_img.size())
# print(lr2x_img.size())
# print(lr4x_img.size())
# print(bc2x_img.size())
# print(bc4x_img.size())
return hr_img, lr2x_img, lr4x_img, bc2x_img, bc4x_img
def __len__(self):
return len(self.image_filenames)
class DevDataset(Data.Dataset):
def __init__(self, image_dir):
super(DevDataset, self).__init__()
self.image_dir = image_dir
self.image_filenames = []
self.image_filenames.extend(os.path.join(image_dir, x)
for x in sorted(os.listdir(image_dir))
if is_image_file(x))
def __getitem__(self, index):
# load image
img = load_img(self.image_filenames[index])
width = img.size[0]
height = img.size[1]
# determine LR image size
lr_img_w_4x = width // 4
lr_img_h_4x = height // 4
lr_img_w_2x = lr_img_w_4x * 2
lr_img_h_2x = lr_img_h_4x * 2
hr_img_w = lr_img_w_4x * 4
hr_img_h = lr_img_h_4x * 4
hr_img = Transforms.Resize((hr_img_h, hr_img_w))(img)
lr2x_img = Transforms.Resize((lr_img_h_2x, lr_img_w_2x), interpolation=Image.ANTIALIAS)(hr_img)
lr4x_img = Transforms.Resize((lr_img_h_4x, lr_img_w_4x), interpolation=Image.ANTIALIAS)(hr_img)
bc2x_img = Transforms.Resize((lr_img_h_2x, lr_img_w_2x), interpolation=Image.BICUBIC)(lr4x_img)
bc4x_img = Transforms.Resize((hr_img_h, hr_img_w), interpolation=Image.BICUBIC)(lr4x_img)
# Tensor Transform
img_transform = Transforms.ToTensor()
hr_img = img_transform(hr_img)
lr2x_img = img_transform(lr2x_img)
lr4x_img = img_transform(lr4x_img)
bc2x_img = img_transform(bc2x_img)
bc4x_img = img_transform(bc4x_img)
return hr_img, lr2x_img, lr4x_img, bc2x_img, bc4x_img
def __len__(self):
return len(self.image_filenames)
class TestDataset(Data.Dataset):
def __init__(self, image_dir):
super(TestDataset, self).__init__()
self.image_dir = image_dir
self.image_filenames = []
self.image_filenames.extend(os.path.join(image_dir, x)
for x in sorted(os.listdir(image_dir))
if is_image_file(x))
def __getitem__(self, index):
# load image
img = load_img(self.image_filenames[index])
width = img.size[0]
height = img.size[1]
# determine LR image size
lr_img_w_4x = width // 4
lr_img_h_4x = height // 4
lr_img_w_2x = lr_img_w_4x * 2
lr_img_h_2x = lr_img_h_4x * 2
hr_img_w = lr_img_w_4x * 4
hr_img_h = lr_img_h_4x * 4
hr_img = Transforms.Resize((hr_img_h, hr_img_w))(img)
lr2x_img = Transforms.Resize((lr_img_h_2x, lr_img_w_2x), interpolation=Image.ANTIALIAS)(hr_img)
lr4x_img = Transforms.Resize((lr_img_h_4x, lr_img_w_4x), interpolation=Image.ANTIALIAS)(hr_img)
bc2x_img = Transforms.Resize((lr_img_h_2x, lr_img_w_2x), interpolation=Image.BICUBIC)(lr4x_img)
bc4x_img = Transforms.Resize((hr_img_h, hr_img_w), interpolation=Image.BICUBIC)(lr4x_img)
# Tensor Transform
img_transform = Transforms.ToTensor()
hr_img = img_transform(hr_img)
lr2x_img = img_transform(lr2x_img)
lr4x_img = img_transform(lr4x_img)
bc2x_img = img_transform(bc2x_img)
bc4x_img = img_transform(bc4x_img)
return hr_img, lr2x_img, lr4x_img, bc2x_img, bc4x_img
def __len__(self):
return len(self.image_filenames)
```
#### File: DocumentSRModel/utils/motionblur.py
```python
import os
import cv2
import numpy as np
from math import ceil
import matplotlib.pyplot as plt
from scipy import signal, misc
class BlurImage(object):
def blur_image_path(self, img_path, PSFs=None, part=None, path_to_save=None, show=False):
"""
:param image_path: path to square, RGB image.
:param PSFs: array of Kernels.
:param part: int number of kernel to use.
:param path__to_save: folder to save results.
"""
if os.path.isfile(img_path):
original = misc.imread(image_path)
result = self.blur_image(self, img, PSFs, part, path_to_save, show)
else:
raise Exception('Not correct path to image.')
def blur_image(self, img, PSFs=None, part=None, path_to_save=None, show=False):
"""
:param img: square, RGB image.
:param PSFs: array of Kernels.
:param part: int number of kernel to use.
:param path_to_save: folder to save results.
"""
img_shape = img.shape
if len(img_shape) < 3:
# raise Exception('We support only RGB images yet.')
print('We support only RGB images yet.')
return None
elif img_shape[0] != img_shape[1]:
# raise Exception('We support only square images yet.')
print('We support only square images yet.')
return None
if PSFs is None:
if path_to_save is None:
PSFs = PSF(canvas=img_shape[0]).fit()
else:
PSFs = PSF(canvas=img_shape[0], path_to_save=os.path.join(path_to_save,
'PSFs.png')).fit(save=True)
if part is None:
psf = PSFs
else:
psf = [PSFs[part]]
yN, xN, channel = img_shape
key, kex = PSFs[0].shape
delta = yN - key
if delta < 0:
print('resolution of image should be higher than kernel')
return None
# assert delta >= 0, 'resolution of image should be higher than kernel'
result = []
if len(psf) > 1:
for p in psf:
tmp = np.pad(p, delta // 2, 'constant')
cv2.normalize(tmp, tmp, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
# blured = np.zeros(img_shape)
blured = cv2.normalize(img, img, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F)
blured[:, :, 0] = np.array(signal.fftconvolve(blured[:, :, 0], tmp, 'same'))
blured[:, :, 1] = np.array(signal.fftconvolve(blured[:, :, 1], tmp, 'same'))
blured[:, :, 2] = np.array(signal.fftconvolve(blured[:, :, 2], tmp, 'same'))
blured = cv2.normalize(blured, blured, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)
result.append(np.abs(blured))
else:
psf = psf[0]
tmp = np.pad(psf, delta // 2, 'constant')
cv2.normalize(tmp, tmp, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
blured = cv2.normalize(img, img, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F)
blured[:, :, 0] = np.array(signal.fftconvolve(blured[:, :, 0], tmp, 'same'))
blured[:, :, 1] = np.array(signal.fftconvolve(blured[:, :, 1], tmp, 'same'))
blured[:, :, 2] = np.array(signal.fftconvolve(blured[:, :, 2], tmp, 'same'))
blured = cv2.normalize(blured, blured, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)
result.append(np.abs(blured))
if show:
self.__plot_canvas(result)
return result
def __plot_canvas(self, result):
if len(result) == 0:
raise Exception('Please run blur_image() method first.')
else:
plt.close()
plt.axis('off')
fig, axes = plt.subplots(1, len(result), figsize=(10, 10))
if len(result) > 1:
for i in range(len(result)):
axes[i].imshow(result[i])
else:
plt.axis('off')
plt.imshow(result[0])
plt.show()
class Trajectory(object):
def __init__(self, canvas=64, iters=2000, max_len=60, expl=None, path_to_save=None):
"""
Generates a variety of random motion trajectories in continuous domain as in [Boracchi and Foi 2012]. Each
trajectory consists of a complex-valued vector determining the discrete positions of a particle following a
2-D random motion in continuous domain. The particle has an initial velocity vector which, at each iteration,
is affected by a Gaussian perturbation and by a deterministic inertial component, directed toward the
previous particle position. In addition, with a small probability, an impulsive (abrupt) perturbation aiming
at inverting the particle velocity may arises, mimicking a sudden movement that occurs when the user presses
the camera button or tries to compensate the camera shake. At each step, the velocity is normalized to
guarantee that trajectories corresponding to equal exposures have the same length. Each perturbation (
Gaussian, inertial, and impulsive) is ruled by its own parameter. Rectilinear Blur as in [Boracchi and Foi
2011] can be obtained by setting anxiety to 0 (when no impulsive changes occurs
:param canvas: size of domain where our trajectory os defined.
:param iters: number of iterations for definition of our trajectory.
:param max_len: maximum length of our trajectory.
:param expl: this param helps to define probability of big shake. Recommended expl = 0.005.
:param path_to_save: where to save if you need.
"""
self.canvas = canvas
self.iters = iters
self.max_len = max_len
if expl is None:
self.expl = 0.1 * np.random.uniform(0, 1)
else:
self.expl = expl
if path_to_save is None:
pass
else:
self.path_to_save = path_to_save
self.tot_length = None
self.big_expl_count = None
self.x = None
def fit(self, show=False, save=False):
"""
Generate motion, you can save or plot, coordinates of motion you can find in x property.
Also you can fin properties tot_length, big_expl_count.
:param show: default False.
:param save: default False.
:return: x (vector of motion).
"""
tot_length = 0
big_expl_count = 0
# how to be near the previous position
# TODO: I can change this paramether for 0.1 and make kernel at all image
centripetal = 0.7 * np.random.uniform(0, 1)
# probability of big shake
prob_big_shake = 0.2 * np.random.uniform(0, 1)
# term determining, at each sample, the random component of the new direction
gaussian_shake = 10 * np.random.uniform(0, 1)
init_angle = 360 * np.random.uniform(0, 1)
img_v0 = np.sin(np.deg2rad(init_angle))
real_v0 = np.cos(np.deg2rad(init_angle))
v0 = complex(real=real_v0, imag=img_v0)
v = v0 * self.max_len / (self.iters - 1)
if self.expl > 0:
v = v0 * self.expl
x = np.array([complex(real=0, imag=0)] * (self.iters))
for t in range(0, self.iters - 1):
if np.random.uniform() < prob_big_shake * self.expl:
next_direction = 2 * v * (np.exp(complex(real=0, imag=np.pi + (np.random.uniform() - 0.5))))
big_expl_count += 1
else:
next_direction = 0
dv = next_direction + self.expl * (
gaussian_shake * complex(real=np.random.randn(), imag=np.random.randn()) - centripetal * x[t]) * (
self.max_len / (self.iters - 1))
v += dv
v = (v / float(np.abs(v))) * (self.max_len / float((self.iters - 1)))
x[t + 1] = x[t] + v
tot_length = tot_length + abs(x[t + 1] - x[t])
# centere the motion
x += complex(real=-np.min(x.real), imag=-np.min(x.imag))
x = x - complex(real=x[0].real % 1., imag=x[0].imag % 1.) + complex(1, 1)
x += complex(real=ceil((self.canvas - max(x.real)) / 2), imag=ceil((self.canvas - max(x.imag)) / 2))
self.tot_length = tot_length
self.big_expl_count = big_expl_count
self.x = x
if show or save:
self.__plot_canvas(show, save)
return self
def __plot_canvas(self, show, save):
if self.x is None:
raise Exception("Please run fit() method first")
else:
plt.close()
plt.plot(self.x.real, self.x.imag, '-', color='blue')
plt.xlim((0, self.canvas))
plt.ylim((0, self.canvas))
if show and save:
plt.savefig(self.path_to_save)
plt.show()
elif save:
if self.path_to_save is None:
raise Exception('Please create Trajectory instance with path_to_save')
plt.savefig(self.path_to_save)
elif show:
plt.show()
class PSF(object):
def __init__(self, canvas=None, trajectory=None, fraction=None, path_to_save=None):
if canvas is None:
self.canvas = (canvas, canvas)
else:
self.canvas = (canvas, canvas)
if trajectory is None:
self.trajectory = Trajectory(canvas=canvas, expl=0.005).fit(show=False, save=False)
else:
self.trajectory = trajectory.x
if fraction is None:
self.fraction = [1/100, 1/10, 1/2, 1]
else:
self.fraction = fraction
self.path_to_save = path_to_save
self.PSFnumber = len(self.fraction)
self.iters = len(self.trajectory)
self.PSFs = []
def fit(self, show=False, save=False):
PSF = np.zeros(self.canvas)
triangle_fun = lambda x: np.maximum(0, (1 - np.abs(x)))
triangle_fun_prod = lambda x, y: np.multiply(triangle_fun(x), triangle_fun(y))
for j in range(self.PSFnumber):
if j == 0:
prevT = 0
else:
prevT = self.fraction[j - 1]
for t in range(len(self.trajectory)):
# print(j, t)
if (self.fraction[j] * self.iters >= t) and (prevT * self.iters < t - 1):
t_proportion = 1
elif (self.fraction[j] * self.iters >= t - 1) and (prevT * self.iters < t - 1):
t_proportion = self.fraction[j] * self.iters - (t - 1)
elif (self.fraction[j] * self.iters >= t) and (prevT * self.iters < t):
t_proportion = t - (prevT * self.iters)
elif (self.fraction[j] * self.iters >= t - 1) and (prevT * self.iters < t):
t_proportion = (self.fraction[j] - prevT) * self.iters
else:
t_proportion = 0
m2 = int(np.minimum(self.canvas[1] - 1, np.maximum(1, np.math.floor(self.trajectory[t].real))))
M2 = int(m2 + 1)
m1 = int(np.minimum(self.canvas[0] - 1, np.maximum(1, np.math.floor(self.trajectory[t].imag))))
M1 = int(m1 + 1)
PSF[m1, m2] += t_proportion * triangle_fun_prod(
self.trajectory[t].real - m2, self.trajectory[t].imag - m1
)
PSF[m1, M2] += t_proportion * triangle_fun_prod(
self.trajectory[t].real - M2, self.trajectory[t].imag - m1
)
PSF[M1, m2] += t_proportion * triangle_fun_prod(
self.trajectory[t].real - m2, self.trajectory[t].imag - M1
)
PSF[M1, M2] += t_proportion * triangle_fun_prod(
self.trajectory[t].real - M2, self.trajectory[t].imag - M1
)
self.PSFs.append(PSF / (self.iters))
if show or save:
self.__plot_canvas(show, save)
return self.PSFs
def __plot_canvas(self, show, save):
if len(self.PSFs) == 0:
raise Exception("Please run fit() method first.")
else:
plt.close()
fig, axes = plt.subplots(1, self.PSFnumber, figsize=(10, 10))
for i in range(self.PSFnumber):
axes[i].imshow(self.PSFs[i], cmap='gray')
if show and save:
if self.path_to_save is None:
raise Exception('Please create Trajectory instance with path_to_save')
plt.savefig(self.path_to_save)
plt.show()
elif save:
if self.path_to_save is None:
raise Exception('Please create Trajectory instance with path_to_save')
plt.savefig(self.path_to_save)
elif show:
plt.show()
if __name__ == '__main__':
folder = 'E:\\work\\proj\\relation_loc\\data\\test'
folder_to_save = 'E:\\work\\proj\\relation_loc\\data\\test\\save'
params = [0.01, 0.009, 0.008, 0.007, 0.005, 0.003]
blurtool = BlurImage()
for path in os.listdir(folder):
print(path)
trajectory = Trajectory(canvas=64, max_len=60, expl=np.random.choice(params)).fit()
psf = PSF(canvas=64, trajectory=trajectory).fit()
original = misc.imread(os.path.join(folder, path))
original = misc.imresize(original, (1024, 1024))
blurtool.blur_image(original, PSFs=psf, part=np.random.choice([1, 2, 3]),
path_to_save=folder_to_save, show=True)
# BlurImage(os.path.join(folder, path), PSFs=psf,
# path__to_save=folder_to_save, part=np.random.choice([1, 2, 3])).\
# blur_image(show=True)
```
#### File: DocumentSRModel/utils/normalize.py
```python
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
def weights_init_normal(m, mean=0.0, std=0.02):
classname = m.__class__.__name__
if isinstance(m, nn.Linear):
m.weight.data.normal_(mean, std)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Conv2d):
m.weight.data.normal_(mean, std)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
m.weight.data.normal_(mean, std)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def norm(imgs, vgg=True):
# normalize for pre-trained vgg model
if vgg:
transform = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# normalize [-1, 1]
else:
transform = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
size = list(imgs.shape)
res = imgs.clone()
if len(size) == 4:
for i in range(size[0]):
res[i] = transform(res[i])
else:
res = transform(imgs)
return res
def denorm(imgs, vgg=True):
size = list(imgs.shape)
res = imgs.clone()
if vgg:
transform = transforms.Normalize(mean=[-2.118, -2.036, -1.804],
std=[4.367, 4.464, 4.444])
if len(size) == 4:
for i in range(size[0]):
res[i] = transform(imgs[i])
else:
res = transform(res)
else:
if len(size) == 4:
for i in range(size[0]):
res[i] = ((res[i] + 1) / 2).clamp(0, 1)
else:
res = (res + 1) / 2
res = res.clamp(0, 1)
return res
```
#### File: model/test/AdvancedEAST.py
```python
def test_AdvancedEAST(config_file):
import sys
sys.path.append('./detection_model/AdvancedEAST')
import os
import time
import argparse
import numpy as np
import torch
from multiprocessing import Pool, RLock, set_start_method
from PIL import Image, ImageDraw
from tqdm import tqdm
import json
from utils.preprocess import point_inside_of_quad
from utils.data_utils import transform
from network.AEast import East
import AdvancedEAST.nms.nms as NMS
from tools.Pascal_VOC import eval_func
import config as cfg
from yacs.config import CfgNode as CN
def read_config_file(config_file):
f = open(config_file)
opt = CN.load_cfg(f)
return opt
opt = read_config_file(config_file)
def sigmoid(x):
"""`y = 1 / (1 + exp(-x))`"""
return 1 / (1 + np.exp(-x))
def resize_image(img_size):
'''size shall be divided by 32, required by network.'''
dsize = np.asarray(img_size, dtype=np.float32)
tsize = opt.train_size
ratio = tsize / np.max(dsize)
dsize = np.rint(dsize * ratio).astype(np.int32)
dsize = dsize - dsize % 32
return dsize[0], dsize[1]
def res2json(result_dir):
"""
method: generate json file
"""
res_list = os.listdir(result_dir)
res_dict = {}
for rf in tqdm(res_list, desc='toJSON'):
if rf[-4:] == '.txt':
respath = os.path.join(result_dir, rf)
with open(respath, 'r') as f:
reslines = f.readlines()
reskey = rf[:-4]
res_dict[reskey] = [{'points': np.rint(np.asarray(l.replace('\n', '').split(','), np.float32)).astype(
np.int32).reshape(-1, 2).tolist()} for l in reslines]
jpath = os.path.join(result_dir, 'res.json')
with open(jpath, 'w') as jf:
json.dump(res_dict, jf)
return jpath
def res2json_1(result_dir):
res_list = os.listdir(result_dir)
res_dict = {}
for rf in tqdm(res_list, desc='toJSON'):
if rf[-4:] == '.txt':
respath = os.path.join(result_dir, rf)
with open(respath, 'r') as f:
reslines = f.readlines()
reskey = rf[3:-4]
res_dict[reskey] = [{'points': np.rint(
np.asarray(l.replace('\n', '').split(',')[:8], np.float32)).astype(np.int32).reshape(-1,
2).tolist()}
for l in reslines]
jpath = os.path.join(result_dir, 'res.json')
with open(jpath, 'w') as jf:
json.dump(res_dict, jf)
return jpath
class Wrapped:
def __init__(self, model, img_dir, isDraw):
self.model = model
self.img_dir = img_dir
self.isDraw = isDraw
self.result_dir = opt.result_dir + opt.task_id + '/'
if not os.path.exists(self.result_dir):
os.makedirs(self.result_dir)
def init_lock(self, lock_):
global lock
lock = lock_
def __call__(self):
img_list = [img_name for img_name in os.listdir(self.img_dir)]
miss = []
if opt.batch_size_per_gpu > 1:
set_start_method('forkserver')
lock_ = RLock()
processes = 2
pool = Pool(processes=processes, initializer=self.init_lock, initargs=(lock_,))
with tqdm(total=len(img_list), desc='Detect') as pbar:
for _, r in enumerate(pool.imap_unordered(self.process, img_list)):
if r[0] == 0:
miss.append(r[1])
pbar.update()
pool.close()
pool.join()
else:
for img_name in tqdm(img_list):
r = self.process(img_name)
if r[0] == 0:
miss.append(r[1])
print(f"{len(miss)} images no detection.")
print(miss)
input_json_path = res2json(self.result_dir)
gt_json_path = opt.gt_json_path
eval_func(input_json_path, gt_json_path, opt.iou_threshold)
def process(self, img_name):
txt_path = self.result_dir + img_name[:-4] + '.txt'
if os.path.exists(txt_path):
with open(txt_path, 'r') as f_txt:
txt_items = f_txt.readlines()
return len(txt_items), img_name
img_path = os.path.join(self.img_dir, img_name)
im = Image.open(img_path).convert('RGB')
if opt.predict_cut_text_line:
im_array = np.array(im, dtype=np.float32)
d_width, d_height = resize_image(im.size)
scale_ratio_w = d_width / im.width
scale_ratio_h = d_height / im.height
im = im.resize((d_width, d_height), Image.BICUBIC)
x = transform(im)
x = x[np.newaxis, :]
y = self.model(x.cuda()).cpu().detach().numpy()
y = np.squeeze(y)
y[:, :, :3] = sigmoid(y[:, :, :3])
cond = np.greater_equal(y[:, :, 0], opt.pixel_threshold)
activation_pixels = np.asarray(np.where(cond), dtype=np.int32)
quad_scores, quad_after_nms = NMS.nms(y, activation_pixels[0], activation_pixels[1])
if self.isDraw:
quad_im = im.copy()
draw = ImageDraw.Draw(im)
for i, j in zip(activation_pixels[0], activation_pixels[1]):
px = (j + 0.5) * opt.pixel_size
py = (i + 0.5) * opt.pixel_size
line_width, line_color = 1, 'aqua'
if y[i, j, 1] >= opt.side_vertex_pixel_threshold:
if y[i, j, 2] < opt.trunc_threshold:
line_width, line_color = 2, 'yellow'
elif y[i, j, 2] >= 1 - opt.trunc_threshold:
line_width, line_color = 2, 'green'
draw.line([(px - 0.5 * opt.pixel_size, py - 0.5 * opt.pixel_size),
(px + 0.5 * opt.pixel_size, py - 0.5 * opt.pixel_size),
(px + 0.5 * opt.pixel_size, py + 0.5 * opt.pixel_size),
(px - 0.5 * opt.pixel_size, py + 0.5 * opt.pixel_size),
(px - 0.5 * opt.pixel_size, py - 0.5 * opt.pixel_size)],
width=line_width, fill=line_color)
im.save(self.result_dir + img_name[:-4] + '_act.jpg')
quad_draw = ImageDraw.Draw(quad_im)
txt_items = []
invalid = 0
for score, geo, s in zip(quad_scores, quad_after_nms, range(len(quad_scores))):
if np.amin(score) > 0:
if self.isDraw:
quad_draw.line([tuple(geo[0]),
tuple(geo[1]),
tuple(geo[2]),
tuple(geo[3]),
tuple(geo[0])], width=2, fill='aqua')
if opt.predict_cut_text_line:
self.cut_text_line(geo, scale_ratio_w, scale_ratio_h, im_array, img_name, s)
rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]
rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()
txt_item = ','.join(map(str, rescaled_geo_list))
txt_items.append(txt_item + '\n')
else:
invalid += 1
if self.isDraw:
quad_im.save(self.result_dir + img_name[:-4] + '_predict.jpg')
# print(f'{invalid} quads invalid with vertex num less then 4.')
with open(txt_path, 'w') as f_txt:
f_txt.writelines(txt_items)
return (len(txt_items), img_name)
def cut_text_line(self, geo, scale_ratio_w, scale_ratio_h, im_array, img_name, s):
geo /= [scale_ratio_w, scale_ratio_h]
p_min = np.amin(geo, axis=0)
p_max = np.amax(geo, axis=0)
min_xy = p_min.astype(int)
max_xy = p_max.astype(int) + 2
sub_im_arr = im_array[min_xy[1]:max_xy[1], min_xy[0]:max_xy[0], :].copy()
for m in range(min_xy[1], max_xy[1]):
for n in range(min_xy[0], max_xy[0]):
if not point_inside_of_quad(n, m, geo, p_min, p_max):
sub_im_arr[m - min_xy[1], n - min_xy[0], :] = 255
sub_im = Image.fromarray(sub_im_arr.astype('uint8')).convert('RGB')
sub_im.save(self.result_dir + img_name[:-4] + '_subim%d.jpg' % s)
print(f'Task id: {opt.task_id}')
assert int(opt.task_id[2:]) in opt.size_group, f'input size shall be in {opt.size_group}'
cp_file = '3T1280_best.pth.tar'
cp_path = os.path.join(opt.result_dir, cp_file)
assert os.path.isfile(cp_path), 'Checkpoint file does not exist.'
print(f'Loading {cp_path}')
checkpoint = torch.load(cp_path)
model = East()
model = model.cuda()
model.load_state_dict(checkpoint['state_dict'])
model.eval()
wrap = Wrapped(model, opt.val_img, opt.draw)
wrap()
```
#### File: FudanOCR/model/test_entry.py
```python
from __future__ import absolute_import
import sys
sys.path.append('./recognition_model/')
sys.path.append('./detection_model/')
sys.path.append("./super_resolution_model/")
sys.path.append("./maskrcnn_benchmark_architecture/")
print("当前系统环境变量为:",sys.path)
from test.moran_v2 import test_moran_v2
from test.AdvancedEAST import test_AdvancedEAST
from test.grcnn import test_grcnn
# from train.fasterrcnn import train_fasterrcnn
from test.east import test_east
from test.TextSnake import test_TextSnake
from test.PSENet import test_psenet
from test.DocumentSRModel import test_documentsrmodel
from test.HARN import test_HARN
from test.PixelLink import test_PixelLink
from test.maskscoring_rcnn import test_maskscoring_rcnn
from test.LSN import test_LSN
from yacs.config import CfgNode as CN
import argparse
import re
# 在这个位置扩充函数
function_dict = {
'MORAN_V2': test_moran_v2,
'AdvancedEAST': test_AdvancedEAST,
'GRCNN': test_grcnn,
'EAST': test_east,
# 'fasterrcnn': train_fasterrcnn,
'TextSnake': test_TextSnake,
'PSENet' : test_psenet,
'DocumentSRModel' : test_documentsrmodel,
'HARN': test_HARN,
'PixelLink': test_PixelLink,
'maskscoring_rcnn': test_maskscoring_rcnn,
'LSN': LSN,
'Your Model Name': 'Your Model Function'
}
def read_config_file(config_file):
# 用yaml重构配置文件
f = open(config_file)
result = CN.load_cfg(f)
return result
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', required=True, help='path to config file')
opt = parser.parse_args()
if __name__ == '__main__':
# 读取配置文件
result = read_config_file(opt.config_file)
# 通过 '_' 区分调用模型的名称,并调用函数
model_name = result.model
function = function_dict[model_name]
# 调用函数,传入配置文件
function(opt.config_file)
```
#### File: model/test/LSN.py
```python
def test_LSN(config_file):
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
from lib.model.networkFactory import networkFactory
from lib.datasets.ctw import CTWDataset, ToTensor
from torchvision import transforms
from torch.utils.data import DataLoader
import os
from skimage import io,transform,measure,draw
import cv2
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import scipy.io as sio
from lib.datasets.proposal_generate import ProposalGenerate
from lib.utils.connect import generate_proposal
from lib.model.unet.unet_model import UNet
import math
import pickle
from config import config as config, init_config
SHOW = True
def toNp(x):
return x.data.cpu().numpy()
def toVar(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
class Demo(object):
"""docstring for Demo"""
def __init__(self,modelHome,gtPath):
global SHOW
super(Demo, self).__init__()
self.nf = networkFactory(modelHome)
self.gts = None
self.gtPath = gtPath
self.sumProposal = 0
self.sumHit = 0
self.sumGt = 0
self.ratio = 0
self.show = SHOW
self.filename = None
self.savePath = './result'
self.savepredict = './predict'
self.testnum = 0
self.strides = [8,16,32,64]
self.PG = ProposalGenerate()
def report(self,image,res):
#print(res)
pass
def saveResult(self,apprContours):
self.testnum+=1
print(str(self.testnum)+'/300')
ans = {}
ans['accuInf'] = apprContours
sio.savemat(self.savePath+'/'+self.filename.split('.')[0]+'.mat',ans)
# def is_rect_overlap(self,rec1,rec2):
# nMaxLeft = 0
# nMaxTop = 0
# nMinRight = 0
# nMinBottom = 0
# nMaxLeft = np.maximum(rec1[:,0],rec2[:,0])
# nMaxTop = np.maximum(rec1[:,1],rec2[:,1])
# nMinRight = np.minimum(rec1[:,2],rec2[:,2])
# nMinBottom = np.minimum(rec1[:,3],rec2[:,3])
# ans = np.ones((len(rec1),len(rec2)))
# idx = np.where((nMaxLeft > nMinRight)|nMaxTop > nMinBottom)[0]
# ans[:,idx] = 0
# return ans
# def merge_mask_box(self,box1,box2,mask1,mask2):
# proposal = box1
# proposal[0] = min(box1[0],box2[0])
# proposal[1] = min(box1[1],box2[1])
# proposal[2] = max(box1[2],box2[2])
# proposal[3] = max(box1[3],box2[3])
# mask = np.zeros((int(proposal[2]-proposal[0]),int(proposal[3]-proposal[1])))
# mask[box1[0]-proposal[0]:box1[2]-proposal[0],box1[1]-proposal[1]:box1[3]-proposal[1]]+=(mask1)
# mask[box2[0]-proposal[0]:box2[2]-proposal[0],box2[1]-proposal[1]:box2[3]-proposal[1]]+=(mask2)
# cv2.imshow('mask',(mask*125).astype(np.uint8))
# cv2.waitKey(0)
# return proposal,mask
# def connect(self,image,pred_mask,bbox,threshold = 0.5):
# showimage = image.copy()
# proposal_box = []
# proposal_mask = []
# for idx,box in enumerate(bbox):
# if(len(proposal_box)==0):
# proposal_box.append(box)
# proposal_mask.append(pred_mask[idx]>0.5)
# continue
# box_overlap = self.is_rect_overlap(np.array([box]),np.array(proposal_box))[0]
# box_overlap_idx = np.where(box_overlap>=1)[0]
# over_threshold_idx = []
# for i in box_overlap_idx:
# propposal,mask = self.merge_mask_box(box,proposal_box[i],pred_mask[idx]>0.5,proposal_mask[i])
# mask_iou = np.sum(mask>1)/np.sum(mask>0)
# if mask_iou>threshold:
# over_threshold_idx.append(i)
# proposal = box
# mask = pred_mask[idx]>0.5
# for j in over_threshold_idx:
# proposal,mask = self.merge_mask_box(proposal,proposal_box[j],mask,proposal_mask[j])
# for j in over_threshold_idx:
# proposal_box.remove(proposal_box[j])
# proposal_mask.remove(proposal_mask[j])
# proposal_box.append(proposal)
# proposal_mask.append(mask)
# return proposal_box,proposal_mask
def display(self,image,res,pred_mask,bbox,sample,circle_labels_pred,all_circle,bbox_score,show=False,threshold=0.5):
# def display(self,image,circle_labels_pred,all_circle,show=False,threshold=0.4):
# ==============================================================================
# mask_all = nn.functional.softmax(pred_mask)
# score,pred_mask_all = torch.max(mask_all,1)
# mask_all = mask_all.view(-1,28,28,2)
# pred_mask_all = torch.max(mask_all,3)[1]
pred_mask_all = pred_mask.data.cpu().numpy()
# pred_mask_all = pred_mask.data.cpu().numpy()
image = image.astype(np.uint8)
showimage = image.copy()
showmask = np.zeros((image.shape[0],image.shape[1])).astype(np.uint8)
score_list = bbox_score.data.cpu().numpy()
print(score_list.shape)
print(pred_mask_all.shape)
# print(len(bbox))
savepath = './result/'+config.filename
if not os.path.exists(savepath):
os.makedirs(savepath)
if not os.path.exists(savepath+"/mask"):
os.makedirs(savepath+"/mask")
if not os.path.exists(savepath+"/bbox"):
os.makedirs(savepath+"/bbox")
if not os.path.exists(savepath+"/score"):
os.makedirs(savepath+"/score")
np.save(savepath+"/mask/"+self.filename.split('.')[0]+"_mask.npy", pred_mask_all)
np.save(savepath+"/bbox/"+self.filename.split('.')[0]+"_bbox.npy", bbox.data.cpu().numpy())
np.save(savepath+"/score/"+self.filename.split('.')[0]+"_score.npy", score_list)
# np.save("./result/ratio/"+self.filename.split('.')[0]+"_ratio.npy", )
# cv2.imwrite("./result/image/"+self.filename,image)
# anspts = generate_proposal(image,pred_mask_all,bbox.data.cpu().numpy())
# outputfile = open('./result/predict/'+self.filename.split('.')[0]+'.txt','w')
# for poly in anspts:
# cv2.polylines(image,[poly],True,(0,0,255),3)
# # write_poly = poly[:,::-1].reshape(-1)
# write_poly = poly.reshape(-1)
# print(write_poly)
# write_poly = write_poly/self.ratio
# write_poly = np.array(write_poly,dtype=np.int32).tolist()
# print(write_poly)
# write_string = ','.join(str(i) for i in write_poly)
# print(write_string)
# outputfile.write(write_string+'\n')
# cv2.imshow('image',image)
# cv2.waitKey(30)
# cv2.imwrite("./result/disp/"+self.filename,image)
# outputfile.close()
# ==============================================================================
# for idx,box in enumerate(bbox):
# score = score_list[idx]
# box = box.data.cpu().numpy().astype(np.int32)
# # print(score)
# # print(box)
# if box[0]<0 or box[1]<0:
# continue
# if box[2]>=showimage.shape[1] or box[3]>=showimage.shape[0]:
# continue
# if(score<threshold):
# continue
# cv2.rectangle(showimage,(int(box[0]),int(box[1])),(int(box[2]),int(box[3])),(0,0,255),3)
# mask = np.maximum(pred_mask_all[idx],0)
# cv2.imshow('origin_mask',np.array(mask*255,dtype=np.uint8))
# # print(mask)
# w,h = int(box[2])-int(box[0]),int(box[3])-int(box[1])
# # print(box)
# # print(mask)
# resize_mask = cv2.resize(mask,(w,h),interpolation=cv2.INTER_NEAREST)*255
# # print(resize_mask)
# showmask[int(box[1]):int(box[3]),int(box[0]):int(box[2])] = np.maximum(showmask[int(box[1]):int(box[3]),int(box[0]):int(box[2])],resize_mask.astype(np.uint8))
# # print(resize_mask)
# cv2.imshow('resize_mask',resize_mask.astype(np.uint8))
# cv2.imshow('showmask',showmask)
# cv2.imshow('showimage',showimage)
# cv2.waitKey(0)
# ==============================================================================
# image = image.astype(np.uint8)
# showimage2 = image.copy()
# cv2.imshow('showimage2',showimage2)
# cv2.waitKey(0)
# for stride in self.strides:
# circle_labels = nn.functional.softmax(circle_labels_pred[str(stride)])
# circle_labels = circle_labels.data.cpu().numpy()
# # print(circle_labels)
# pos_idx = np.where(circle_labels[:,1]>=0.5)[0]
# print(stride,len(pos_idx))
# circle = all_circle[str(stride)]
# for idx in pos_idx:
# box = circle[idx]
# cv2.rectangle(showimage2,(int(box[0]),int(box[1])),(int(box[2]),int(box[3])),(0,0,255),3)
# cv2.imshow('showimage2',showimage2)
# cv2.waitKey(0)
# ==============================================================================
# mask_all = nn.functional.softmax(pred_mask)
# score,pred_mask_all = torch.max(mask_all,1)
# mask_all = mask_all.view(-1,14,14,2)
# pred_mask_all = mask_all[:,:,:,1]>=0.5
# pred_mask_all = pred_mask_all.view(-1,14,14)
# image = image.astype(np.uint8)
# pred_mask = pred_mask.squeeze().data.cpu().numpy().astype(np.float32)
# bbox = bbox.data.cpu().numpy().astype(np.float32)
# showmask = np.zeros((image.shape[0],image.shape[1])).astype(np.uint8)
# show_point = image.copy()
# # np.save("./result/mask.npy", pred_mask)
# # np.save("./result/bbox.npy", bbox)
# # cv2.imwrite('./result/image.jpg',image)
# # proposal_image = image.copy()
# for i,box in enumerate(bbox):
# cv2.rectangle(image,(int(box[0]),int(box[1])),(int(box[2]),int(box[3])),(0,0,255),3)
# cv2.circle(show_point,(int((box[0]+box[2])/2),int((box[1]+box[3])/2)),3,(255,0,0),3)
# w,h = int(box[2]-box[0]),int(box[3]-box[1])
# mask = pred_mask[i]
# # print(mask)
# resize_mask = cv2.resize(mask,(w,h),interpolation=cv2.INTER_NEAREST)*255
# showmask[int(box[1]):int(box[3]),int(box[0]):int(box[2])] = (showmask[int(box[1]):int(box[3]),int(box[0]):int(box[2])] | resize_mask.astype(np.uint8))
# # print(np.max(resize_mask))
# # print(resize_mask.astype(np.uint8))
# # cv2.imshow('region',(mask*255).astype(np.uint8))
# # cv2.imshow('mask',resize_mask.astype(np.uint8))
# # proposal_box,proposal_mask = self.connect(image,pred_mask,bbox)
# # for proposal in proposal_box:
# # cv2.rectangle(proposal_image,(int(proposal[0]),int(proposal[1])),(int(proposal[2]),int(proposal[3])),(0,0,255),3)
# cv2.imshow('image',image.astype(np.uint8))
# # cv2.imshow('proposal_image',proposal_image.astype(np.uint8))
# cv2.imshow('showmask',showmask)
# cv2.imshow('show_point',show_point)
# cv2.waitKey(0)
def rescale(self,image,preferredShort = 768,maxLong = 2048):
h,w,_ = image.shape
longSide = max(h,w)
shortSide = min(h,w)
self.ratio = preferredShort*1.0/shortSide
if self.ratio*longSide > maxLong:
self.ratio = maxLong*1.0/longSide
image = cv2.resize(image,None,None,self.ratio,self.ratio,interpolation=cv2.INTER_LINEAR)
return image
def alignDim(self,image):
h2,w2,_ = image.shape
H2 = int(math.ceil(h2/64.0)*64)
W2 = int(math.ceil(w2/64.0)*64)
ret_image = np.zeros((H2,W2,_))
ret_image[:h2,:w2,:] = image
return ret_image
def toTensor(self,image):
# pixel_means = np.array([[[102.9801, 115.9465, 122.7717]]])
# image -= pixel_means
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = (image - mean)/std
image = image.transpose((2,0,1))
return torch.from_numpy((image.astype(np.float32)))
def peerSingleImage(self,image,imageName,display = True,report = True):
image = self.rescale(image)
image = self.alignDim(image)
# cvimage = self.alignDim(cvimage)
sample = {}
ptss = [[[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1],[1,1]]]
bbx = [[1,1,1,1]]
all_circle = {}
for stride in self.strides:
# print(stride)
labels=None
all_anchors = None
labels,all_anchors,mask_label= self.PG.run(stride,np.array([2,2.5,3,3.5]),[1],image.shape[0]/stride,image.shape[1]/stride,[image.shape[0],image.shape[1],1],0,ptss,image,bbx)
sample[str(stride)] = all_anchors
all_circle[str(stride)] = Variable(torch.from_numpy(np.ascontiguousarray(sample[str(stride)].astype(np.float32))).squeeze().cuda(),requires_grad=False)
tensorimage = image.copy()
tensor = self.toTensor(tensorimage).unsqueeze(0)
tensor = Variable(tensor.cuda(),requires_grad = False)
res = None
# print(tensor)
# print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n')
self.model.eval()
threshold = 0.5
testMaxNum = 1000
print(self.model)
circle_labels_pred,pred_mask,bbox,bbox_idx,pos_idx_stride,bbox_score = self.model.forward(tensor,all_circle,None,istraining=False,threshold=threshold,testMaxNum=testMaxNum) #
# pred_mask = pred_mask.view(-1,14,14)
if report:
self.report(image,res)
if display:
self.display(image,res,pred_mask,bbox,sample,circle_labels_pred,all_circle,bbox_score,threshold=threshold)
torch.cuda.empty_cache()
# self.display(image,circle_labels_pred,all_circle,threshold=threshold)
def peerGalary(self,imageFolder,display=True,report = True):
F = False
for filename in sorted(os.listdir(imageFolder)):
print(filename)
self.filename = filename
# if not filename == '1072.jpg':
# continue
# if filename == '1342.jpg':
# F=True
# if not F:
# continue
# image = io.imread(os.path.join(imageFolder,filename),plugin = 'pil')
image = cv2.imread(os.path.join(imageFolder,filename))
if len(image)==2:
image = image[0]
self.peerSingleImage(image,filename,display,report)
def prepareNetwork(self,networkPath,type='vgg16'):
torch.cuda.set_device(1)
if type == 'vgg16':
self.savePath = self.savePath+'/'+networkPath.split('/')[-2]+'/'+networkPath.split('/')[-1].split('.')[0]
# if not os.path.exists(self.savePath):
# os.makedirs(self.savePath)
self.model = self.nf.vgg16()
pretrainedDict = torch.load(networkPath,map_location='cpu')
modelDict = self.model.state_dict()
pretrainedDict = {k: v for k, v in pretrainedDict.items() if k in modelDict}
modelDict.update(pretrainedDict)
self.model.load_state_dict(modelDict)
print('Load model:{}'.format(networkPath))
self.model.cuda()
self.model.eval()
elif type == 'resnet34':
self.savePath = self.savePath+'/'+networkPath.split('/')[-2]+'/'+networkPath.split('/')[-1].split('.')[0]
# if not os.path.exists(self.savePath):
# os.makedirs(self.savePath)
self.model = self.nf.resnet34()
pretrainedDict = torch.load(networkPath,map_location='cpu')
modelDict = self.model.state_dict()
pretrainedDict = {k: v for k, v in pretrainedDict.items() if k in modelDict}
modelDict.update(pretrainedDict)
self.model.load_state_dict(modelDict)
print('Load model:{}'.format(networkPath))
self.model.cuda()
self.model.eval()
elif type == 'resnet50':
self.savePath = self.savePath+'/'+networkPath.split('/')[-2]+'/'+networkPath.split('/')[-1].split('.')[0]
# if not os.path.exists(self.savePath):
# os.makedirs(self.savePath)
self.model = self.nf.resnet50()
pretrainedDict = torch.load(networkPath,map_location='cpu')
modelDict = self.model.state_dict()
pretrainedDict = {k: v for k, v in pretrainedDict.items() if k in modelDict}
modelDict.update(pretrainedDict)
self.model.load_state_dict(modelDict)
print('Load model:{}'.format(networkPath))
self.model.cuda()
self.model.eval()
elif type == 'unet':
self.savePath = self.savePath+'/'+networkPath.split('/')[-2]+'/'+networkPath.split('/')[-1].split('.')[0]
# if not os.path.exists(self.savePath):
# os.makedirs(self.savePath)
self.model = UNet(3,1)
pretrainedDict = torch.load(networkPath,map_location='cpu')
modelDict = self.model.state_dict()
pretrainedDict = {k: v for k, v in pretrainedDict.items() if k in modelDict}
modelDict.update(pretrainedDict)
self.model.load_state_dict(modelDict)
print('Load model:{}'.format(networkPath))
self.model.cuda()
self.model.eval()
demo = Demo('./pretrainmodel','/data/2019AAAI/data/ctw1500/train/text_label_curve')
demo.prepareNetwork('/data/2019AAAI/output/config013/92.model',type=config.net)
demo.peerGalary('/data/2019AAAI/data/test',display = True,report = False) #config.testDatasetroot +'/text_image'
# /home/zhouzhao/Documents/Invoice_test/20170823/ZZSDK
# /home/zhouzhao/Projects/STD/DataSet/Images/Test
```
#### File: model/train/AdvancedEAST.py
```python
def train_AEAST(config_file):
import sys
sys.path.append('./detection_model/AdvancedEAST')
import os
import argparse
import time
import numpy as numpy
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torch.optim import Adam
from torch.optim.lr_scheduler import LambdaLR
from tqdm import tqdm
import config as cfg
from utils.data_utils import custom_dset, collate_fn
from network.AEast import East
from network.loss import LossFunc
from utils.utils import AverageMeter, save_log
from utils.earlystop import EarlyStopping
os.environ["CUDA_VISIBLE_DEVICES"] = "0,3"
from yacs.config import CfgNode as CN
def read_config_file(config_file):
f = open(config_file)
opt = CN.load_cfg(f)
return opt
opt = read_config_file(config_file)
class Wrapped:
def __init__(self, train_loader, val_loader, model, criterion, optimizer, scheduler, start_epoch, val_loss_min):
self.train_loader = train_loader
self.val_loader = val_loader
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.scheduler = scheduler #
self.start_epoch = start_epoch #
self.tick = time.strftime("%Y%m%d-%H-%M-%S", time.localtime(time.time()))
self.earlystopping = EarlyStopping(opt.patience, val_loss_min)
def __call__(self):
for epoch in tqdm(range(self.start_epoch + 1, opt.max_epoch + 1), desc='Epoch'):
if epoch == 1:
tqdm.write("Validating pretrained model.")
self.validate(0)
if epoch > 1 and epoch % opt.decay_step == 0:
tqdm.write("Learning rate - Epoch: [{0}]: {1}".format(epoch - 1,self.optimizer.param_groups[0]['lr']))
self.train(epoch)
if self.validate(epoch): # if earlystop
print('Earlystopping activates. Training stopped.')
break
def validate(self, epoch):
losses = AverageMeter()
self.model.eval()
for i, (img, gt) in tqdm(enumerate(self.val_loader), desc='Val', total=len(self.val_loader)):
img = img.cuda()
gt = gt.cuda()
east_detect = self.model(img)
loss = self.criterion(gt, east_detect)
losses.update(loss.item(), img.size(0))
tqdm.write('Validate Loss - Epoch: [{0}] Avg Loss {1}'.format(epoch,losses.avg))
save_log(losses, epoch, i + 1, len(self.val_loader), self.tick, split='Validation')
earlystop, save = self.earlystopping(losses.avg)
if not earlystop and save:
state = {
'epoch': epoch,
'state_dict': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'val_loss_min': losses.avg
}
self.earlystopping.save_checkpoint(state, losses.avg)
return earlystop
def train(self, epoch):
losses = AverageMeter()
self.model.train()
for i, (img, gt) in tqdm(enumerate(self.train_loader), desc='Train', total=len(self.train_loader)):
img = img.cuda()
gt = gt.cuda()
east_detect = self.model(img)
loss = self.criterion(gt, east_detect)
losses.update(loss.item(), img.size(0))
# backward propagation
self.scheduler.step()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if (i + 1) % opt.print_step == 0:
tqdm.write(
'Training loss - Epoch: [{0}][{1}/{2}] Loss {loss.val:.4f} Avg Loss {loss.avg:.4f}'.format(
epoch, i + 1, len(self.train_loader), loss=losses))
save_log(losses, epoch, i + 1, len(self.train_loader), self.tick, split='Training')
class LRPolicy:
def __init__(self, rate, step):
self.rate = rate
self.step = step
def __call__(self, it):
return self.rate ** (it // self.step)
print('=== AdvancedEAST ===')
print('Task id: {0}'.format(opt.task_id))
print('=== Initialzing DataLoader ===')
print('Multi-processing on {0} cores'.format(opt.num_process))
batch_size = opt.batch_size_per_gpu
trainset = custom_dset(split='train')
valset = custom_dset(split='val')
train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn,
num_workers=opt.num_workers, drop_last=False)
val_loader = DataLoader(valset, batch_size=1, collate_fn=collate_fn, num_workers=opt.num_workers)
print('=== Building Network ===')
model = East()
model = model.cuda()
os.environ["CUDA_VISIBLE_DEVICES"] = "1,2"
model = nn.DataParallel(model, device_ids=opt.gpu_ids) # 数据并行
params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Total parameters: {0}'.format(params))
cudnn.benchmark = True
criterion = LossFunc()
optimizer = Adam(model.parameters(), lr=opt.lr_rate)
# decay every opt.decay_step epoch / every decay_step iter
decay_step = len(train_loader) * opt.decay_step
scheduler = LambdaLR(optimizer, lr_lambda=LRPolicy(rate=opt.decay_rate, step=decay_step))
print('Batch size: {0}'.format(batch_size))
print('Initial learning rate: {0}\nDecay step: {1}\nDecay rate: {2}\nPatience: {3}'.format(
opt.lr_rate, opt.decay_step, opt.decay_rate, opt.patience))
start_epoch = 0
val_loss_min = None
print('=== Training ===')
wrap = Wrapped(train_loader, val_loader, model, criterion, optimizer, scheduler, start_epoch, val_loss_min)
wrap()
```
#### File: model/train/TextSnake.py
```python
from __future__ import print_function
def train_TextSnake(config_file):
import sys
sys.path.append('./detection_model/TextSnake_pytorch')
import os
import time
import torch
import torch.backends.cudnn as cudnn
import torch.utils.data as data
from torch.optim import lr_scheduler
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
from dataset.total_text import TotalText
from network.loss import TextLoss
from network.textnet import TextNet
from util.augmentation import EvalTransform, NewAugmentation
from util.config import config as cfg, update_config, print_config, init_config
from util.misc import AverageMeter
from util.misc import mkdirs, to_device
from util.option import BaseOptions
from util.visualize import visualize_network_output
from yacs.config import CfgNode as CN
global total_iter
total_iter = 0
def read_config_file(config_file):
"""
read config information form yaml file
"""
f = open(config_file)
opt = CN.load_cfg(f)
return opt
opt = read_config_file(config_file)
def adjust_learning_rate(optimizer, i):
if 0 <= i*opt.batch_size < 100000:
lr = opt.lr
elif 100000 <= i*opt.batch_size < 400000:
lr = opt.lr * 0.1
else:
lr = opt.lr * 0.1 * (0.94 ** ((i*opt.batch_size-300000) // 100000))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_requires_grad(model, i):
if 0 <= i < 4000:
for name, param in model.named_parameters():
if name == 'conv1.0.weight' or name == 'conv1.0.bias' or \
name == 'conv1.1.weight' or name == 'conv1.1.bias':
param.requires_grad = False
else:
for name, param in model.named_parameters():
if name == 'conv1.0.weight' or name == 'conv1.0.bias' or \
name == 'conv1.1.weight' or name == 'conv1.1.bias':
param.requires_grad = True
def save_model(model, optimizer, scheduler, epoch):
save_dir = os.path.join(opt.save_dir, opt.exp_name)
if not os.path.exists(save_dir):
mkdirs(save_dir)
save_path = os.path.join(save_dir, 'textsnake_{}_{}.pth'.format(model.backbone_name, epoch))
print('Saving to {}.'.format(save_path))
state_dict = {
'epoch': epoch,
'model': model.state_dict(),
'optim': optimizer.state_dict()
# 'scheduler': scheduler.state_dict()
}
torch.save(state_dict, save_path)
def load_model(save_path):
print('Loading from {}.'.format(save_path))
checkpoint = torch.load(save_path)
return checkpoint
def train(model, train_loader, criterion, scheduler, optimizer, epoch, summary_writer):
start = time.time()
losses = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
model.train()
global total_iter
for i, (img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, meta) in enumerate(train_loader):
data_time.update(time.time() - end)
img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map = to_device(
img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map)
output = model(img)
tr_loss, tcl_loss, sin_loss, cos_loss, radii_loss = \
criterion(output, tr_mask, tcl_mask, sin_map, cos_map, radius_map, train_mask, total_iter)
loss = tr_loss + tcl_loss + sin_loss + cos_loss + radii_loss
# backward
# scheduler.step()
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.update(loss.item())
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if opt.viz and i < opt.vis_num:
visualize_network_output(output, tr_mask, tcl_mask, prefix='train_{}'.format(i))
if i % opt.display_freq == 0:
print('Epoch: [ {} ][ {:03d} / {:03d} ] - Loss: {:.4f} - tr_loss: {:.4f} - tcl_loss: {:.4f} - sin_loss: {:.4f} - cos_loss: {:.4f} - radii_loss: {:.4f} - {:.2f}s/step'.format(
epoch, i, len(train_loader), loss.item(), tr_loss.item(), tcl_loss.item(), sin_loss.item(), cos_loss.item(), radii_loss.item(), batch_time.avg)
)
# write summary
if total_iter % opt.summary_freq == 0:
print('Summary in {}'.format(os.path.join(opt.summary_dir, opt.exp_name)))
tr_pred = output[:, 0:2].softmax(dim=1)[:, 1:2]
tcl_pred = output[:, 2:4].softmax(dim=1)[:, 1:2]
summary_writer.add_image('input_image', vutils.make_grid(img, normalize=True), total_iter)
summary_writer.add_image('tr/tr_pred', vutils.make_grid(tr_pred * 255, normalize=True), total_iter)
summary_writer.add_image('tr/tr_mask', vutils.make_grid(torch.unsqueeze(tr_mask * train_mask, 1) * 255), total_iter)
summary_writer.add_image('tcl/tcl_pred', vutils.make_grid(tcl_pred * 255, normalize=True), total_iter)
summary_writer.add_image('tcl/tcl_mask', vutils.make_grid(torch.unsqueeze(tcl_mask * train_mask, 1) * 255), total_iter)
summary_writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], total_iter)
summary_writer.add_scalar('model/tr_loss', tr_loss.item(), total_iter)
summary_writer.add_scalar('model/tcl_loss', tcl_loss.item(), total_iter)
summary_writer.add_scalar('model/sin_loss', sin_loss.item(), total_iter)
summary_writer.add_scalar('model/cos_loss', cos_loss.item(), total_iter)
summary_writer.add_scalar('model/radii_loss', radii_loss.item(), total_iter)
summary_writer.add_scalar('model/loss', loss.item(), total_iter)
total_iter += 1
print('Speed: {}s /step, {}s /epoch'.format(batch_time.avg, time.time() - start))
if epoch % opt.save_freq == 0:
save_model(model, optimizer, scheduler, epoch)
print('Training Loss: {}'.format(losses.avg))
def validation(model, valid_loader, criterion):
"""
print a series of loss information
"""
model.eval()
losses = AverageMeter()
for i, (img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, meta) in enumerate(valid_loader):
print(meta['image_id'])
img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map = to_device(
img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map)
output = model(img)
tr_loss, tcl_loss, sin_loss, cos_loss, radii_loss = \
criterion(output, tr_mask, tcl_mask, sin_map, cos_map, radius_map, train_mask)
loss = tr_loss + tcl_loss + sin_loss + cos_loss + radii_loss
losses.update(loss.item())
if opt.viz and i < opt.vis_num:
visualize_network_output(output, tr_mask, tcl_mask, prefix='val_{}'.format(i))
if i % opt.display_freq == 0:
print(
'Validation: - Loss: {:.4f} - tr_loss: {:.4f} - tcl_loss: {:.4f} - sin_loss: {:.4f} - cos_loss: {:.4f} - radii_loss: {:.4f}'.format(
loss.item(), tr_loss.item(), tcl_loss.item(), sin_loss.item(),
cos_loss.item(), radii_loss.item())
)
print('Validation Loss: {}'.format(losses.avg))
# parse arguments
torch.cuda.set_device(opt.num_device)
option = BaseOptions(config_file)
args = option.initialize()
init_config(opt, config_file)
update_config(opt, args)
print_config(opt)
data_root = os.path.join(opt.data_root, opt.dataset)
trainset = TotalText(
data_root=data_root,
ignore_list=os.path.join(data_root, 'ignore_list.txt'),
is_training=True,
transform=NewAugmentation(size=opt.input_size, mean=opt.means, std=opt.stds, maxlen=1280, minlen=512)
)
train_loader = data.DataLoader(trainset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.num_workers)
# Model
model = TextNet(backbone=opt.backbone, output_channel=7)
model = model.to(opt.device)
if opt.cuda:
cudnn.benchmark = True
criterion = TextLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
# scheduler = lr_scheduler.StepLR(optimizer, step_size=10000, gamma=0.94)
# if opt.dataset == 'ArT_train':
# scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[10000, 50000], gamma=0.1)
# elif opt.dataset == 'LSVT_full_train':
# scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[10000, 50000], gamma=0.1)
# load model if resume
if opt.resume is not False:
checkpoint = load_model(opt.resume)
opt.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optim'])
# scheduler.load_state_dict(checkpoint['scheduler'])
total_iter = checkpoint['epoch'] * len(train_loader)
if not os.path.exists(os.path.join(opt.summary_dir, opt.exp_name)):
os.mkdir(os.path.join(opt.summary_dir, opt.exp_name))
summary_writer = SummaryWriter(log_dir=os.path.join(opt.summary_dir, opt.exp_name))
print('Start training TextSnake.')
for epoch in range(opt.start_epoch, opt.max_epoch):
adjust_learning_rate(optimizer, total_iter)
train(model, train_loader, criterion, None, optimizer, epoch, summary_writer)
print('End.')
```
#### File: FudanOCR/utils/Detval.py
```python
def detval(input, gt, cfg):
import numpy as np
import json
from utils.polygon_wrapper import iod
from utils.polygon_wrapper import area_of_intersection
from utils.polygon_wrapper import area
import os
input_json_path = input#os.path.join(cfg.ADDRESS.OUTPUT_DIR, 'result.json')
gt_json_path = gt#os.path.join(cfg.ADDRESS.DETECTION.TRAIN_GT_DIR, 'train_labels.json')
global val_result
def input_reading(polygons):
det = []
for polygon in polygons:
polygon['points'] = np.array(polygon['points'])
det.append(polygon)
return det
def gt_reading(gt_dict, img_key):
polygons = gt_dict[img_key]
gt = []
for polygon in polygons:
polygon['points'] = np.array(polygon['points'])
gt.append(polygon)
return gt
def detection_filtering(detections, groundtruths, threshold=0.5):
"""
ignore detected illegal text region
"""
before_filter_num = len(detections)
for gt_id, gt in enumerate(groundtruths):
if (gt['transcription'] == '###') and (gt['points'].shape[1] > 1):
gt_x = list(map(int, np.squeeze(gt['points'][:, 0])))
gt_y = list(map(int, np.squeeze(gt['points'][:, 1])))
for det_id, detection in enumerate(detections):
det_x = list(map(int, np.squeeze(detection['points'][:, 0])))
det_y = list(map(int, np.squeeze(detection['points'][:, 1])))
det_gt_iou = iod(det_x, det_y, gt_x, gt_y)
if det_gt_iou > threshold:
detections[det_id] = []
detections[:] = [item for item in detections if item != []]
if before_filter_num - len(detections) > 0:
print("Ignore {} illegal detections".format(before_filter_num - len(detections)))
return detections
def gt_filtering(groundtruths):
before_filter_num = len(groundtruths)
for gt_id, gt in enumerate(groundtruths):
if gt['transcription'] == '###' or gt['points'].shape[0] < 3:
groundtruths[gt_id] = []
groundtruths[:] = [item for item in groundtruths if item != []]
if before_filter_num - len(groundtruths) > 0:
print("Ignore {} illegal groundtruths".format(before_filter_num - len(groundtruths)))
return groundtruths
def generate_json(cfg):
if cfg.BASE.MODEL == 'TEXTNET':
from model.detection_model.TextSnake_pytorch.util import global_data
val_result = global_data._get_det_value()
with open(os.path.join(cfg.ADDRESS.DET_RESULT_DIR, 'result.json'), 'w') as f:
json.dump(val_result, f)
def sigma_calculation(det_x, det_y, gt_x, gt_y):
"""
sigma = inter_area / gt_area
"""
return np.round((area_of_intersection(det_x, det_y, gt_x, gt_y) / area(gt_x, gt_y)), 2)
def tau_calculation(det_x, det_y, gt_x, gt_y):
"""
tau = inter_area / det_area
"""
return np.round((area_of_intersection(det_x, det_y, gt_x, gt_y) / area(det_x, det_y)), 2)
def one_to_one(local_sigma_table, local_tau_table, local_accumulative_recall,
local_accumulative_precision, global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag):
"""
Args:
local_sigma_table:
local_tau_table:
local_accumulative_recall:
local_accumulative_precision:
global_accumulative_recall:
global_accumulative_precision:
gt_flag:
det_flag:
Returns:
"""
for gt_id in range(num_gt):
qualified_sigma_candidates = np.where(local_sigma_table[gt_id, :] > tr)
num_qualified_sigma_candidates = qualified_sigma_candidates[0].shape[0]
qualified_tau_candidates = np.where(local_tau_table[gt_id, :] > tp)
num_qualified_tau_candidates = qualified_tau_candidates[0].shape[0]
if (num_qualified_sigma_candidates == 1) and (num_qualified_tau_candidates == 1):
global_accumulative_recall = global_accumulative_recall + 1.0
global_accumulative_precision = global_accumulative_precision + 1.0
local_accumulative_recall = local_accumulative_recall + 1.0
local_accumulative_precision = local_accumulative_precision + 1.0
gt_flag[0, gt_id] = 1
matched_det_id = np.where(local_sigma_table[gt_id, :] > tr)
det_flag[0, matched_det_id] = 1
return local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, gt_flag, det_flag
def one_to_many(local_sigma_table, local_tau_table, local_accumulative_recall,
local_accumulative_precision, global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag):
for gt_id in range(num_gt):
# skip the following if the groundtruth was matched
if gt_flag[0, gt_id] > 0:
continue
non_zero_in_sigma = np.where(local_sigma_table[gt_id, :] > 0)
num_non_zero_in_sigma = non_zero_in_sigma[0].shape[0]
if num_non_zero_in_sigma >= k:
# search for all detections that overlaps with this groundtruth
qualified_tau_candidates = np.where((local_tau_table[gt_id, :] >= tp) & (det_flag[0, :] == 0))
num_qualified_tau_candidates = qualified_tau_candidates[0].shape[0]
if num_qualified_tau_candidates == 1:
if local_tau_table[gt_id, qualified_tau_candidates] >= tp and local_sigma_table[gt_id, qualified_tau_candidates] >= tr:
# became an one-to-one case
global_accumulative_recall = global_accumulative_recall + 1.0
global_accumulative_precision = global_accumulative_precision + 1.0
local_accumulative_recall = local_accumulative_recall + 1.0
local_accumulative_precision = local_accumulative_precision + 1.0
gt_flag[0, gt_id] = 1
det_flag[0, qualified_tau_candidates] = 1
elif np.sum(local_sigma_table[gt_id, qualified_tau_candidates]) >= tr:
gt_flag[0, gt_id] = 1
det_flag[0, qualified_tau_candidates] = 1
global_accumulative_recall = global_accumulative_recall + fsc_k
global_accumulative_precision = global_accumulative_precision + num_qualified_tau_candidates * fsc_k
local_accumulative_recall = local_accumulative_recall + fsc_k
local_accumulative_precision = local_accumulative_precision + num_qualified_tau_candidates * fsc_k
return local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, gt_flag, det_flag
def many_to_many(local_sigma_table, local_tau_table, local_accumulative_recall,
local_accumulative_precision, global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag):
for det_id in range(num_det):
# skip the following if the detection was matched
if det_flag[0, det_id] > 0:
continue
non_zero_in_tau = np.where(local_tau_table[:, det_id] > 0)
num_non_zero_in_tau = non_zero_in_tau[0].shape[0]
if num_non_zero_in_tau >= k:
# search for all detections that overlaps with this groundtruth
qualified_sigma_candidates = np.where((local_sigma_table[:, det_id] >= tp) & (gt_flag[0, :] == 0))
num_qualified_sigma_candidates = qualified_sigma_candidates[0].shape[0]
if num_qualified_sigma_candidates == 1:
if local_tau_table[qualified_sigma_candidates, det_id] >= tp and local_sigma_table[qualified_sigma_candidates, det_id] >= tr:
# became an one-to-one case
global_accumulative_recall = global_accumulative_recall + 1.0
global_accumulative_precision = global_accumulative_precision + 1.0
local_accumulative_recall = local_accumulative_recall + 1.0
local_accumulative_precision = local_accumulative_precision + 1.0
gt_flag[0, qualified_sigma_candidates] = 1
det_flag[0, det_id] = 1
elif np.sum(local_tau_table[qualified_sigma_candidates, det_id]) >= tp:
det_flag[0, det_id] = 1
gt_flag[0, qualified_sigma_candidates] = 1
global_accumulative_recall = global_accumulative_recall + num_qualified_sigma_candidates * fsc_k
global_accumulative_precision = global_accumulative_precision + fsc_k
local_accumulative_recall = local_accumulative_recall + num_qualified_sigma_candidates * fsc_k
local_accumulative_precision = local_accumulative_precision + fsc_k
return local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, gt_flag, det_flag
# Initial config
global_tp = 0
global_fp = 0
global_fn = 0
global_sigma = []
global_tau = []
tr = 0.7
tp = 0.6
fsc_k = 0.8
k = 2
# load json file as dict
generate_json(cfg)
with open(input_json_path, 'r') as f:
input_dict = json.load(f)
with open(gt_json_path, 'r') as f:
gt_dict = json.load(f)
for input_img_key, input_cnts in input_dict.items():
print(input_img_key)
detections = input_reading(input_cnts)
groundtruths = gt_reading(gt_dict, input_img_key.replace('res', 'gt'))
detections = detection_filtering(detections, groundtruths) # filters detections overlapping with DC area
groundtruths = gt_filtering(groundtruths)
local_sigma_table = np.zeros((len(groundtruths), len(detections)))
local_tau_table = np.zeros((len(groundtruths), len(detections)))
for gt_id, gt in enumerate(groundtruths):
if len(detections) > 0:
gt_x = list(map(int, np.squeeze(gt['points'][:, 0])))
gt_y = list(map(int, np.squeeze(gt['points'][:, 1])))
for det_id, detection in enumerate(detections):
det_x = list(map(int, np.squeeze(detection['points'][:, 0])))
det_y = list(map(int, np.squeeze(detection['points'][:, 1])))
local_sigma_table[gt_id, det_id] = sigma_calculation(det_x, det_y, gt_x, gt_y)
local_tau_table[gt_id, det_id] = tau_calculation(det_x, det_y, gt_x, gt_y)
global_sigma.append(local_sigma_table)
global_tau.append(local_tau_table)
global_accumulative_recall = 0
global_accumulative_precision = 0
total_num_gt = 0
total_num_det = 0
print('############## Evaluate Result ###############')
input_list = list(input_dict.keys())
for idx in range(len(global_sigma)):
local_sigma_table = global_sigma[idx]
local_tau_table = global_tau[idx]
num_gt = local_sigma_table.shape[0]
num_det = local_sigma_table.shape[1]
total_num_gt = total_num_gt + num_gt
total_num_det = total_num_det + num_det
local_accumulative_recall = 0
local_accumulative_precision = 0
gt_flag = np.zeros((1, num_gt))
det_flag = np.zeros((1, num_det))
#######first check for one-to-one case##########
local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, \
gt_flag, det_flag = one_to_one(local_sigma_table, local_tau_table,
local_accumulative_recall, local_accumulative_precision,
global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag)
#######then check for one-to-many case##########
local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, \
gt_flag, det_flag = one_to_many(local_sigma_table, local_tau_table,
local_accumulative_recall, local_accumulative_precision,
global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag)
#######then check for many-to-many case##########
local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, \
gt_flag, det_flag = many_to_many(local_sigma_table, local_tau_table,
local_accumulative_recall, local_accumulative_precision,
global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag)
# print each image evaluate result
try:
local_precision = local_accumulative_precision / num_det
except ZeroDivisionError:
local_precision = 0
try:
local_recall = local_accumulative_recall / num_gt
except ZeroDivisionError:
local_recall = 0
print('{0:12} Precision: {1:.4f}, Recall: {2:.4f}'.format(input_list[idx].replace('res', 'gt') + '.jpg',
local_precision, local_recall))
# print global evaluate result
try:
recall = global_accumulative_recall / total_num_gt
except ZeroDivisionError:
recall = 0
try:
precision = global_accumulative_precision / total_num_det
except ZeroDivisionError:
precision = 0
try:
f_score = 2*precision*recall/(precision+recall)
except ZeroDivisionError:
f_score = 0
print('Global Precision: {:.4f}, Recall: {:.4f}, f_score: {:.4f}'.format(precision, recall, f_score))
print('over')
return precision, recall, f_score
```
#### File: FudanOCR/utils/downloadCallback.py
```python
def callbackfunc(blocknum, blocksize, totalsize):
'''回调函数
@blocknum: 已经下载的数据块
@blocksize: 数据块的大小
@totalsize: 远程文件的大小
'''
percent = 100.0 * blocknum * blocksize / totalsize
nowLoad = blocknum * blocksize / 1024 / 1024
total = totalsize / 1024 / 1024
if percent > 100:
percent = 100
print("\r %.2fM/%.2fM %.2f%%" % (nowLoad, total, percent), end=" ")
```
#### File: FudanOCR/utils/strLabelConverterForCTC.py
```python
import torch
import torch.nn as nn
import collections
import random
import numpy as np
import cv2
from faker import Faker
# from CNumber import cnumber
import datetime
class strLabelConverterForCTC(object):
def __init__(self, alphabet):
self.alphabet = alphabet + ' ' # for `-1` index
self.dict = {}
for i, char in enumerate(alphabet):
# NOTE: 0 is reserved for 'blank' required by wrap_ctc
self.dict[char] = i + 1
def encode(self, text, depth=0, lower=True):
"""Support batch or single str."""
if isinstance(text, str):
if lower:
text = text.lower()
for char in text:
# Fix the bug
if self.alphabet.find(char) == -1:
print(char)
text = [self.dict[char] for char in text]
length = [len(text)]
elif isinstance(text, collections.Iterable):
length = [len(s) for s in text]
text = ''.join(text)
text, _ = self.encode(text)
if depth:
return text, len(text)
return (torch.IntTensor(text), torch.IntTensor(length))
def decode(self, t, length, raw=False):
if length.numel() == 1:
length = length[0]
t = t[:length]
if raw:
return ''.join([self.alphabet[i - 1] for i in t])
else:
char_list = []
for i in range(length):
if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):
char_list.append(self.alphabet[t[i] - 1])
return ''.join(char_list).replace(' ', '')
else:
texts = []
index = 0
for i in range(length.numel()):
l = length[i]
texts.append(self.decode(
t[index:index + l], torch.IntTensor([l]), raw=raw))
index += l
return texts
``` |
{
"source": "jingyi1997/pytorch-adda",
"score": 3
} |
#### File: pytorch-adda/datasets/usps.py
```python
import gzip
import os
import pickle
import pdb
import urllib
import cv2
import numpy as np
import torch
import torch.utils.data as data
from torchvision import datasets, transforms
from datasets.memcached_dataset import McDataset
import params
class USPS(data.Dataset):
"""USPS Dataset.
Args:
root (string): Root directory of dataset where dataset file exist.
train (bool, optional): If True, resample from dataset randomly.
download (bool, optional): If true, downloads the dataset
from the internet and puts it in root directory.
If dataset is already downloaded, it is not downloaded again.
transform (callable, optional): A function/transform that takes in
an PIL image and returns a transformed version.
E.g, ``transforms.RandomCrop``
"""
url = "https://raw.githubusercontent.com/mingyuliutw/CoGAN/master/cogan_pytorch/data/uspssample/usps_28x28.pkl"
def __init__(self, root, train=True, transform=None, download=False):
"""Init USPS dataset."""
# init params
self.root = os.path.expanduser(root)
self.filename = "usps_28x28.pkl"
self.train = train
# Num of Train = 7438, Num ot Test 1860
self.transform = transform
self.dataset_size = None
# download dataset.
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found." +
" You can use download=True to download it")
self.train_data, self.train_labels = self.load_samples()
if self.train:
total_num_samples = self.train_labels.shape[0]
indices = np.arange(total_num_samples)
np.random.shuffle(indices)
self.train_data = self.train_data[indices[0:self.dataset_size], ::]
self.train_labels = self.train_labels[indices[0:self.dataset_size]]
self.train_data *= 255.0
dump_images(self.train_data, self.train_labels, 'data/raw/testing')
pdb.set_trace()
self.train_data = self.train_data.transpose(
(0, 2, 3, 1)) # convert to HWC
def __getitem__(self, index):
"""Get images and target for data loader.
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, label = self.train_data[index, ::], self.train_labels[index]
if self.transform is not None:
img = self.transform(img)
label = torch.LongTensor([np.int64(label).item()])
# label = torch.FloatTensor([label.item()])
return img, label
def __len__(self):
"""Return size of dataset."""
return self.dataset_size
def _check_exists(self):
"""Check if dataset is download and in right place."""
return os.path.exists(os.path.join(self.root, self.filename))
def download(self):
"""Download dataset."""
filename = os.path.join(self.root, self.filename)
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if os.path.isfile(filename):
return
print("Download %s to %s" % (self.url, os.path.abspath(filename)))
urllib.request.urlretrieve(self.url, filename)
print("[DONE]")
return
def load_samples(self):
"""Load sample images from dataset."""
filename = os.path.join(self.root, self.filename)
f = gzip.open(filename, "rb")
data_set = pickle.load(f, encoding="bytes")
f.close()
if self.train:
images = data_set[0][0]
labels = data_set[0][1]
self.dataset_size = labels.shape[0]
else:
images = data_set[1][0]
labels = data_set[1][1]
self.dataset_size = labels.shape[0]
return images, labels
def get_usps(train):
"""Get USPS dataset loader."""
# image pre-processing
pre_process = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
mean=params.dataset_mean,
std=params.dataset_std)])
# dataset and data loader
#usps_dataset = USPS(root=params.data_root,
# train=train,
# transform=pre_process,
# download=False)
if train:
usps_dataset = McDataset(root_dir=params.tgt_dataset_root,meta_file = params.tgt_dataset_list,
transform=pre_process)
else:
usps_dataset = McDataset(root_dir=params.tgt_dataset_eval_root,meta_file = params.tgt_dataset_eval_list, transform=pre_process)
usps_data_loader = torch.utils.data.DataLoader(
dataset=usps_dataset,
batch_size=params.batch_size,
shuffle=True)
return usps_data_loader
def dump_images(images, labels, img_dir):
for i in range(10):
img_sub_dir = os.path.join(img_dir, str(i))
os.makedirs(img_sub_dir)
img_num = 0
for image, label in zip(images, labels):
img_path = os.path.join(img_dir, str(label), str(img_num)+'.png')
img_num += 1
cv2.imwrite(img_path, np.squeeze(image, axis=0))
if img_num % 100 == 0:
print('%d images have been writen'.format(img_num))
``` |
{
"source": "jingyi1997/UFDN",
"score": 2
} |
#### File: UFDN/src/ufdn.py
```python
import torch
import torch.nn as nn
from torch.autograd import Variable
def get_act(name):
if name == 'LeakyReLU':
return nn.LeakyReLU(0.2)
elif name == 'ReLU':
return nn.ReLU()
elif name == 'Tanh':
return nn.Tanh()
elif name == '':
return None
else:
raise NameError('Unknown activation:'+name)
def LoadModel(name,parameter,img_size,input_dim):
if name == 'vae':
code_dim = parameter['code_dim']
enc_list = []
for layer,para in enumerate(parameter['encoder']):
if para[0] == 'conv':
if layer==0:
init_dim = input_dim
next_dim,kernel_size,stride,pad,bn,act = para[1:7]
act = get_act(act)
enc_list.append((para[0],(init_dim, next_dim,kernel_size,stride,pad,bn,act)))
init_dim = next_dim
else:
raise NameError('Unknown encoder layer type:'+para[0])
dec_list = []
for layer,para in enumerate(parameter['decoder']):
if para[0] == 'conv':
next_dim,kernel_size,stride,pad,bn,act,insert_code = para[1:8]
act = get_act(act)
dec_list.append((para[0],(init_dim, next_dim,kernel_size,stride,pad,bn,act),insert_code))
init_dim = next_dim
else:
raise NameError('Unknown decoder layer type:'+para[0])
return UFDN(enc_list,dec_list,code_dim)
elif name == 'nn':
dnet_list = []
init_dim = input_dim
for para in parameter['dnn']:
if para[0] == 'fc':
next_dim,bn,act,dropout = para[1:5]
act = get_act(act)
dnet_list.append((para[0],(init_dim, next_dim,bn,act,dropout)))
init_dim = next_dim
else:
raise NameError('Unknown nn layer type:'+para[0])
return Discriminator(dnet_list)
elif name == 'cnn':
dnet_list = []
init_dim = input_dim
cur_img_size = img_size
reshaped = False
for layer,para in enumerate(parameter['dnn']):
if para[0] == 'conv':
next_dim,kernel_size,stride,pad,bn,act = para[1:7]
act = get_act(act)
dnet_list.append((para[0],(init_dim, next_dim,kernel_size,stride,pad,bn,act)))
init_dim = next_dim
cur_img_size /= 2
elif para[0] == 'fc':
if not reshaped:
init_dim = int(cur_img_size*cur_img_size*init_dim)
reshaped = True
next_dim,bn,act,dropout = para[1:5]
act = get_act(act)
dnet_list.append((para[0],(init_dim, next_dim,bn,act,dropout)))
init_dim = next_dim
else:
raise NameError('Unknown encoder layer type:'+para[0])
return Discriminator(dnet_list)
else:
raise NameError('Unknown model type:'+name)
# custom weights initialization
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
# create a Convolution/Deconvolution block
def ConvBlock(c_in, c_out, k=4, s=2, p=1, norm='bn', activation=None, transpose=False, dropout=None):
layers = []
if transpose:
layers.append(nn.ConvTranspose2d(c_in, c_out, kernel_size=k, stride=s, padding=p))
else:
layers.append( nn.Conv2d(c_in, c_out, kernel_size=k, stride=s, padding=p))
if dropout:
layers.append(nn.Dropout2d(dropout))
if norm == 'bn':
layers.append(nn.BatchNorm2d(c_out))
if activation is not None:
layers.append(activation)
return nn.Sequential(*layers)
# create a fully connected layer
def FC(c_in, c_out, norm='bn', activation=None, dropout=None):
layers = []
layers.append(nn.Linear(c_in,c_out))
if dropout:
if dropout>0:
layers.append(nn.Dropout(dropout))
if norm == 'bn':
layers.append(nn.BatchNorm1d(c_out))
if activation is not None:
layers.append(activation)
return nn.Sequential(*layers)
# UFDN model
# Reference : https://github.com/pytorch/examples/blob/master/vae/main.py
# list of layer should be a list with each element being (layer type,(layer parameter))
# fc should occur after/before any convblock if used in encoder/decoder
# e.g. ('conv',( input_dim, neurons, kernel size, stride, padding, normalization, activation))
# ('fc' ,( input_dim, neurons, normalization, activation))
class UFDN(nn.Module):
def __init__(self, enc_list, dec_list, attr_dim):
super(UFDN, self).__init__()
### Encoder
self.enc_layers = []
for l in range(len(enc_list)):
self.enc_layers.append(enc_list[l][0])
if enc_list[l][0] == 'conv':
c_in,c_out,k,s,p,norm,act = enc_list[l][1]
if l == len(enc_list) -1 :
setattr(self, 'enc_mu', ConvBlock(c_in,c_out,k,s,p,norm,act,transpose=False))
setattr(self, 'enc_logvar', ConvBlock(c_in,c_out,k,s,p,norm,act,transpose=False))
else:
setattr(self, 'enc_'+str(l), ConvBlock(c_in,c_out,k,s,p,norm,act,transpose=False))
elif enc_list[l][0] == 'fc':
c_in,c_out,norm,act = enc_list[l][1]
if l == len(enc_list) -1 :
setattr(self, 'enc_mu', FC(c_in,c_out,norm,act))
setattr(self, 'enc_logvar', FC(c_in,c_out,norm,act))
else:
setattr(self, 'enc_'+str(l), FC(c_in,c_out,norm,act))
else:
raise ValueError('Unreconized layer type')
### Decoder
self.dec_layers = []
self.attr_dim = attr_dim
for l in range(len(dec_list)):
self.dec_layers.append((dec_list[l][0],dec_list[l][2]))
if dec_list[l][0] == 'conv':
c_in,c_out,k,s,p,norm,act = dec_list[l][1]
if dec_list[l][2]: c_in += self.attr_dim
setattr(self, 'dec_'+str(l), ConvBlock(c_in,c_out,k,s,p,norm,act,transpose=True))
elif dec_list[l][0] == 'fc':
c_in,c_out,norm,act = dec_list[l][1]
if dec_list[l][2]: c_in += self.attr_dim
setattr(self, 'dec_'+str(l), FC(c_in,c_out,norm,act))
else:
raise ValueError('Unreconized layer type')
self.apply(weights_init)
def encode(self, x):
for l in range(len(self.enc_layers)-1):
if (self.enc_layers[l] == 'fc') and (len(x.size())>2):
batch_size = x.size()[0]
x = x.view(batch_size,-1)
x = getattr(self, 'enc_'+str(l))(x)
if (self.enc_layers[-1] == 'fc') and (len(x.size())>2):
batch_size = x.size()[0]
x = x.view(batch_size,-1)
mu = getattr(self, 'enc_mu')(x)
logvar = getattr(self, 'enc_logvar')(x)
return mu, logvar
def reparameterize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z, insert_attrs = None):
for l in range(len(self.dec_layers)):
if (self.dec_layers[l][0] != 'fc') and (len(z.size()) != 4):
z = z.unsqueeze(-1).unsqueeze(-1)
if (insert_attrs is not None) and (self.dec_layers[l][1]):
if len(z.size()) == 2:
z = torch.cat([z,insert_attrs],dim=1)
else:
H,W = z.size()[2], z.size()[3]
z = torch.cat([z,insert_attrs.unsqueeze(-1).unsqueeze(-1).repeat(1,1,H,W)],dim=1)
z = getattr(self, 'dec_'+str(l))(z)
return z
def forward(self, x, insert_attrs = None, return_enc = False):
batch_size = x.size()[0]
mu, logvar = self.encode(x)
if len(mu.size()) > 2:
mu = mu.view(batch_size,-1)
logvar = logvar.view(batch_size,-1)
z = self.reparameterize(mu, logvar)
if return_enc:
return z
else:
return self.decode(z,insert_attrs), mu, logvar
class Discriminator(nn.Module):
def __init__(self, layer_list):
super(Discriminator, self).__init__()
self.layer_list = []
for l in range(len(layer_list)-1):
self.layer_list.append(layer_list[l][0])
if layer_list[l][0] == 'conv':
c_in,c_out,k,s,p,norm,act = layer_list[l][1]
setattr(self, 'layer_'+str(l), ConvBlock(c_in,c_out,k,s,p,norm,act,transpose=False))
elif layer_list[l][0] == 'fc':
c_in,c_out,norm,act,drop = layer_list[l][1]
setattr(self, 'layer_'+str(l), FC(c_in,c_out,norm,act,drop))
else:
raise ValueError('Unreconized layer type')
self.layer_list.append(layer_list[-1][0])
c_in,c_out,norm,act,_ = layer_list[-1][1]
if not isinstance(c_out, list):
c_out = [c_out]
self.output_dim = len(c_out)
for idx,d in enumerate(c_out):
setattr(self, 'layer_out_'+str(idx), FC(c_in,d,norm,act,0))
self.apply(weights_init)
def forward(self, x):
for l in range(len(self.layer_list)-1):
if (self.layer_list[l] == 'fc') and (len(x.size()) != 2):
batch_size = x.size()[0]
x = x.view(batch_size,-1)
x = getattr(self, 'layer_'+str(l))(x)
output = []
for d in range(self.output_dim):
output.append(getattr(self,'layer_out_'+str(d))(x))
if self.output_dim == 1:
return output[0]
else:
return tuple(output)
``` |
{
"source": "jingyi7777/adapt-seq-design",
"score": 3
} |
#### File: data/scripts/curate_ccf005_pairs.py
```python
CONTEXT_NT = 20
from collections import defaultdict
import csv
import gzip
import math
import statistics
def read_input(in_fn):
"""Read annotated (summarized) input csv file.
In this file, every line represents a guide-target pair.
Args:
in_fn: path to input file
Returns:
list of dicts where each element corresponds to a row
"""
col_names = {}
lines = []
with open(in_fn) as f:
for i, line in enumerate(f):
# Split the line and skip the first column (row number)
ls = line.rstrip().split(',')
ls = ls[1:]
if i == 0:
# Read header
for j, col_name in enumerate(ls):
col_names[j] = col_name
else:
# Read line
row = {}
for j, val in enumerate(ls):
row[col_names[j]] = val
lines += [row]
return lines
def read_droplet_input(in_droplets):
"""Read input csv file of droplets.
In this file, every line represents a droplet. There may be multiple
droplets for each guide-target pair, so a pair can be represented by many
lines.
This file is messy -- e.g., newline characters within quotes -- so let's
use the csv module here to read.
Args:
in_droplets: path to input file
Returns:
list of dicts where each element corresponds to a droplet
"""
# Only keep a subset of the columns
cols_to_keep = ['Target', 'crRNA', 'k']
col_name_idx = {}
lines = []
with gzip.open(in_droplets, 'rt') as f:
reader = csv.reader(f)
col_names = next(reader, None)
col_name_idx = {k: i for i, k in enumerate(col_names)}
for i, ls in enumerate(reader):
row = {}
for col in cols_to_keep:
row[col] = ls[col_name_idx[col]]
lines += [row]
return lines
def filter_controls(rows):
"""Remove crRNA controls from rows.
This leaves in target controls.
Returns:
rows with only experiments
"""
rows_filtered = []
for row in rows:
if row['guide_type'] == 'exp':
# Check this row
assert 'control' not in row['crRNA']
rows_filtered += [row]
else:
# Check this is a control
assert row['guide_type'] == 'neg' or row['guide_type'] == 'pos'
assert 'control' in row['crRNA']
return rows_filtered
def filter_inactive_guides(rows):
"""Filter two inactive guides.
For some reason, two guides were completely inactive -- probably a
technical issue. Filter these out.
Returns:
rows with two inactive guides filtered
"""
inactive_guides = ['block18_guide0', 'block7_guide13']
rows_filtered = []
for row in rows:
if row['crRNA'] in inactive_guides:
# Verify this is inactive
assert float(row['median']) < -2.5
else:
# Keep it
rows_filtered += [row]
return rows_filtered
def hamming_dist(a, b):
"""Compute Hamming distance between two strings.
"""
assert len(a) == len(b)
return sum(1 for i in range(len(a)) if a[i] != b[i])
def reverse_complement(x):
"""Construct reverse complement of string.
"""
rc = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
x = x.upper()
return ''.join(rc[b] for b in x[::-1])
def reformat_row(row):
"""Verify and summarize contents of a row.
Args:
row: dict representing a row (guide-target pair)
Returns:
row with new columns, removed columns, and renamed columns
"""
# Check that guide_target is the reverse complement of spacer_seq
spacer_rc = reverse_complement(row['spacer_seq'].replace('u', 't'))
assert spacer_rc == row['guide_target']
guide_seq = row['guide_target']
guide_pos = int(row['pos'])
full_target_seq = row['target_seq']
# Check that the Hamming distance to the target is reasonable
target_at_guide = full_target_seq[guide_pos:(guide_pos + len(guide_seq))]
hd = hamming_dist(guide_seq, target_at_guide)
if row['target_type'] == 'exp':
# 1 mismatch (if mismatch is within guide)
if int(float(row['mismatch_position'])) < 28:
assert hd == 1
else:
assert hd == 0
elif row['target_type'] == 'pos':
# matching
assert hd == 0
elif row['target_type'] == 'neg':
# not matching
assert hd > 1
# Extract target sequence before and after guide (context)
target_before = full_target_seq[(guide_pos - CONTEXT_NT):guide_pos]
target_after = full_target_seq[(guide_pos + len(guide_seq)):(guide_pos + len(guide_seq) + CONTEXT_NT)]
assert (target_before + target_at_guide + target_after) in full_target_seq
# Add 'N' before or after target context if there are no bases there
if len(target_before) < CONTEXT_NT:
missing_bases = CONTEXT_NT - len(target_before)
target_before = 'N'*missing_bases + target_before
if len(target_after) < CONTEXT_NT:
missing_bases = CONTEXT_NT - len(target_after)
target_after = 'N'*missing_bases + target_after
# Check the PFS
if row['PFS'] != '':
assert row['PFS'] == target_after[:2]
# Extract the block
block = int(float(row['block']))
assert block == float(row['block'])
# Remake row
row_new = {}
row_new['crrna'] = row['crRNA']
row_new['target'] = row['Target']
row_new['guide_seq'] = guide_seq
row_new['guide_pos_nt'] = guide_pos
row_new['target_at_guide'] = target_at_guide
row_new['target_before'] = target_before
row_new['target_after'] = target_after
row_new['crrna_block'] = block
row_new['type'] = row['target_type']
row_new['guide_target_hamming_dist'] = hd
row_new['out_logk_median'] = float(row['median'])
row_new['out_logk_stdev'] = float(row['std']) if row['count'] != '1' else 0
row_new['out_logk_replicate_count'] = int(row['count'])
return row_new
def add_replicate_measurements(rows, droplets):
"""Add a column giving replicate information to each row.
Each technical replicate measurement is a droplet. For each guide-target
pair, there are 1 or more replicate measurements.
Args:
rows: list of dicts, where each element represents a guide-target pair
droplets: list of dicts, where each element represents a droplet
Returns:
rows with an added column 'out_logk_measurements', as given by the
individual droplets
"""
# Construct a mapping {(target, crRNA): [replicate measurements]}
measurements = defaultdict(list)
for droplet in droplets:
# Note that, in droplets, 'k' is really log(k)
target = droplet['Target']
crrna = droplet['crRNA']
logk = float(droplet['k'])
measurements[(target, crrna)].append(logk)
rows_new = []
for row in rows:
# Fetch and sort the list of measurements for this guide-target pair
m = measurements[(row['target'], row['crrna'])]
m = sorted(m)
# Check that the summary statistics agree with the measurements
assert len(m) >= 1
assert row['out_logk_replicate_count'] == len(m)
assert math.isclose(row['out_logk_median'], statistics.median(m),
rel_tol=1e-5)
if len(m) == 1:
assert row['out_logk_stdev'] == 0
else:
assert math.isclose(row['out_logk_stdev'], statistics.stdev(m),
rel_tol=1e-5)
# Comma-separate the measurements
m_str = ','.join(str(v) for v in m)
row['out_logk_measurements'] = m_str
rows_new += [row]
return rows_new
def write_output(rows, out_fn):
"""Write a TSV file output, after reformatting.
"""
cols = ['guide_seq', 'guide_pos_nt', 'target_at_guide', 'target_before',
'target_after', 'crrna_block', 'type', 'guide_target_hamming_dist',
'out_logk_median', 'out_logk_stdev', 'out_logk_replicate_count',
'out_logk_measurements']
with open(out_fn, 'w') as fw:
def write_list(l):
fw.write('\t'.join([str(x) for x in l]) + '\n')
write_list(cols)
for row in rows:
row_list = [row[c] for c in cols]
write_list(row_list)
def main():
# Paths to input/output files
IN = "CCF005_pairs_annotated.csv"
IN_DROPLETS = "CCF005_pairs_droplets.filtered.csv.gz"
OUT = "CCF-curated/CCF005_pairs_annotated.curated.tsv"
rows = read_input(IN)
rows = filter_controls(rows)
rows = filter_inactive_guides(rows)
# Reformat rows and check a few things
new_rows = []
for row in rows:
row_new = reformat_row(row)
new_rows += [row_new]
rows = new_rows
# Add droplet-level (replicate) measurements
droplets = read_droplet_input(IN_DROPLETS)
rows = add_replicate_measurements(rows, droplets)
write_output(rows, OUT)
if __name__ == "__main__":
main()
```
#### File: data/scripts/resample_ccf_data.py
```python
import gzip
import numpy as np
import merge_ccf_data
def make_row_per_measurement(cols, col_idx, rows,
num_replicates_to_sample=10):
"""Resample measurements, making one row per measurement.
Args:
cols: list of column names
col_idx: dict {column name: index of column}
rows: list of rows across multiple datasets
num_replicates_to_sample: number of measurements to sample,
with replacement, for each guide-target row
Returns:
cols and rows, with one measurement per row; cols is
slightly different (for 'out_*' values) than the
input cols
"""
cols_input = [c for c in cols if not c.startswith('out_')]
new_cols = cols_input + ['out_logk_measurement']
new_rows = []
num_with_sufficient_replicates = 0
for row in rows:
# Start the new rows with input (non-output) values
new_row_start = []
for c in cols_input:
new_row_start += [row[col_idx[c]]]
# Get measurements for this guide-target pair
measurements_str = row[col_idx['out_logk_measurements']].split(',')
measurements = [float(x) for x in measurements_str]
measurements_sampled = np.random.choice(measurements,
size=num_replicates_to_sample)
if len(measurements) >= num_replicates_to_sample:
num_with_sufficient_replicates += 1
for m in measurements_sampled:
new_row = new_row_start + [m]
new_rows += [new_row]
print(("Number of input rows (guide-target pairs) with >= %d "
"measurements is %d of %d") % (num_replicates_to_sample,
num_with_sufficient_replicates, len(rows)))
return new_cols, new_rows
def write_rows(cols, rows, out_fn):
"""Write a .tsv.gz file output.
Args:
cols: list of column names
rows: list of rows, merged
out_fn: path to output TSV file
"""
with gzip.open(out_fn, 'wt') as fw:
def write_list(l):
fw.write('\t'.join([str(x) for x in l]) + '\n')
# Write header
write_list(cols)
# Write each row
for row in rows:
write_list(row)
def main():
# Set seed
np.random.seed(1)
# Paths to input/output files
IN_CCF = "CCF-curated/CCF_merged_pairs_annotated.curated.tsv"
OUT = "CCF-curated/CCF_merged_pairs_annotated.curated.resampled.tsv.gz"
# Read
cols, col_idx, rows = merge_ccf_data.read_curated_data(IN_CCF)
# Transform to have 1 row per measurement
new_cols, new_rows = make_row_per_measurement(cols, col_idx, rows)
# Write
write_rows(new_cols, new_rows, OUT)
if __name__ == "__main__":
main()
```
#### File: jingyi7777/adapt-seq-design/predictor_baseline.py
```python
import argparse
import fnn
import parse_data
import predictor
import random
import rnn
import numpy as np
import scipy
import sklearn
import sklearn.ensemble
import sklearn.linear_model
import sklearn.metrics
import sklearn.model_selection
import sklearn.utils
import tensorflow as tf
gpus_are_available = len(tf.config.list_physical_devices('GPU')) > 0
def parse_args():
"""Parse arguments.
Returns:
argument namespace
"""
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--dataset',
choices=['cas13'],
default='cas13',
help=("Dataset to use."))
parser.add_argument('--cas13-subset',
choices=['exp', 'pos', 'neg', 'exp-and-pos'],
help=("Use a subset of the Cas13 data. See parse_data module "
"for descriptions of the subsets. To use all data, do not "
"set."))
parser.add_argument('--cas13-classify',
action='store_true',
help=("If set, only classify Cas13 activity into inactive/active"))
parser.add_argument('--cas13-regress-on-all',
action='store_true',
help=("If set, perform regression for Cas13 data on all data "
"(this can be reduced using --cas13-subset)"))
parser.add_argument('--cas13-regress-only-on-active',
action='store_true',
help=("If set, perform regression for Cas13 data only on the "
"active class"))
parser.add_argument('--cas13-normalize-crrna-activity',
action='store_true',
help=("If set, normalize the activity of each crRNA (guide) "
"across its targets to have mean 0 and stdev 1; this means "
"prediction is performed based on target differences (e.g., "
"mismatches) rather than inherent sequence of the crRNA"))
parser.add_argument('--cas13-use-difference-from-wildtype-activity',
action='store_true',
help=("If set, use the activity value of a guide g and target t "
"pair to be the difference between the measured activity of "
"g-t and the mean activity between g and all wildtype "
"(matching) targets of g; this means prediction is "
"performed based on targeted differences (e.g., mismatches) "
"rather than inherent sequence of the crRNA"))
parser.add_argument('--context-nt',
type=int,
default=10,
help=("nt of target sequence context to include alongside each "
"guide"))
parser.add_argument('--regression-scoring-method',
choices=['mse', 'rho'],
default='rho',
help=("Method to use for scoring regression results; 'mse' for "
"mean squared error, 'rho' for Spearman rank correlation"))
parser.add_argument('--test-split-frac',
type=float,
default=0.3,
help=("Fraction of the dataset to use for testing the final "
"model"))
parser.add_argument('--models-to-use',
nargs='+',
help=("List of model names to use. If not set, use all."))
parser.add_argument('--nested-cross-val',
action='store_true',
help=("If set, perform nested cross-validation to evaluate "
"model selection, rather than just cross-validation to "
"select a single model"))
parser.add_argument('--nested-cross-val-outer-num-splits',
type=int,
default=5,
help=("Number of outer folds to use for nested cross-validation"))
parser.add_argument('--nested-cross-val-out-tsv',
help=("Path to output TSV at which to write metrics on the "
"validation data for each outer fold of nested "
"cross-validation (one row per outer fold; each column "
"gives a metric)"))
parser.add_argument('--nested-cross-val-feat-coeffs-out-tsv',
help=("Path to output TSV at which to write a coefficient for "
"each feature (only linear models) for each outer fold "
"of nested cross-validation"))
parser.add_argument('--nested-cross-val-run-for',
nargs='+',
type=int,
help=("If set, only run the given outer splits (0-based). If "
"not set, run for all."))
parser.add_argument('--seed',
type=int,
default=1,
help=("Random seed"))
args = parser.parse_args()
# Print the arguments provided
print(args)
return args
def set_seed(seed):
"""Set tensorflow and numpy seed.
sklearn appears to use the numpy random number generator, so setting
the numpy seed applies to that too.
Args:
seed: random seed
"""
tf.random.set_seed(seed)
np.random.seed(seed)
def random_search_cv(model_name, model_obj, cv, scorer, n_iter=100):
"""Construct a RandomizedSearchCV object.
Args:
model_name: name of model
model_obj: model object; either a scikit-learn estimator or one that
follows its interface
cv: cross-validation splitting iterator
scorer: scoring function to use
n_iter: number of samples for random search
Returns:
sklearn.model_selection.RandomizedSearchCV object
"""
# In some cases (e.g., pulling from distributions whose log is uniform),
# we'll want to just draw many samples and have RandomizedSearchCV draw
# from these; specify how many samples we draw and provide as
# representative of the space
space_size = 1000
# Set params
if model_name == 'l1_lr':
params = {
'alpha': np.logspace(-8, 8, base=10.0, num=space_size)
}
elif model_name == 'l2_lr':
params = {
'alpha': np.logspace(-8, 8, base=10.0, num=space_size)
}
elif model_name == 'l1l2_lr':
params = {
'l1_ratio': 1.0 - np.logspace(-5, 0, base=2.0, num=space_size)[::-1] + 2.0**(-5),
'alpha': np.logspace(-8, 8, base=10.0, num=space_size)
}
elif model_name == 'l1_logit':
params = {
'C': np.logspace(-4, 4, base=10.0, num=space_size)
}
elif model_name == 'l2_logit':
params = {
'C': np.logspace(-4, 4, base=10.0, num=space_size)
}
elif model_name == 'l1l2_logit':
params = {
'l1_ratio': 1.0 - np.logspace(-5, 0, base=2.0, num=space_size)[::-1] + 2.0**(-5),
'C': np.logspace(-4, 4, base=10.0, num=space_size)
}
elif model_name == 'gbt':
params = {
'learning_rate': np.logspace(-2, 0, base=10.0, num=space_size),
'n_estimators': np.logspace(0, 8, base=2, num=space_size).astype(int),
'min_samples_split': np.logspace(1, 3, base=2, num=space_size).astype(int),
'min_samples_leaf': np.logspace(0, 2, base=2, num=space_size).astype(int),
'max_depth': np.logspace(1, 3, base=2, num=space_size).astype(int),
'max_features': [None, 0.1, 'sqrt', 'log2']
}
elif model_name == 'rf':
# For max_depth, set 'None' 1/2 of the time
params = {
'n_estimators': np.logspace(0, 8, base=2, num=space_size).astype(int),
'min_samples_split': np.logspace(1, 3, base=2, num=space_size).astype(int),
'min_samples_leaf': np.logspace(0, 2, base=2, num=space_size).astype(int),
'max_depth': [None]*space_size + list(np.logspace(1, 4, base=2,
num=space_size).astype(int)),
'max_features': [None, 0.1, 'sqrt', 'log2']
}
elif model_name == 'mlp':
# Constructing a space of layer_dims requires choosing the number of
# layers and the dimensions of each; note that layer_dims does NOT
# include the output layer of the MLP (which has dimension=1)
layer_dims = []
for i in range(space_size):
num_layers = np.random.randint(1, 4)
dims = [np.random.randint(4, 128) for _ in range(num_layers)]
layer_dims += [dims]
params = {
'layer_dims': layer_dims,
'dropout_rate': scipy.stats.uniform(0, 0.5),
'activation_fn': ['relu', 'elu'],
'batch_size': [16]
}
elif model_name == 'lstm':
params = {
'units': np.logspace(1, 8, base=2, num=space_size).astype(int),
'bidirectional': [False, True],
'embed_dim': [None]*4 + list(range(1, 9)),
'dropout_rate': scipy.stats.uniform(0, 0.5),
'batch_size': [16]
}
elif model_name == 'svm':
params = {
'C': np.logspace(-8, 8, base=10.0, num=space_size),
'penalty': ['l1', 'l2']
}
else:
raise Exception("Unknown model: '%s'" % model_name)
if model_name == 'lstm' or model_name == 'mlp':
# Do not parallelize (to avoid memory issues); TensorFlow will already
# take advantage of multiple GPUs or CPUs if available
n_jobs = 1
if model_name == 'lstm':
# Use a smaller search space; this is slow to train
n_iter = 50
else:
# Use all but 1 CPU (n_jobs=-2)
n_jobs = -2
rs_cv = sklearn.model_selection.RandomizedSearchCV(model_obj,
params, cv=cv, refit=True,
scoring=scorer, n_iter=n_iter,
n_jobs=n_jobs)
return rs_cv
def classify(x_train, y_train, x_test, y_test,
parsers,
num_inner_splits=5,
scoring_method='auc-roc',
models_to_use=None,
feat_coeffs_out_tsv_f=None):
"""Perform classification.
Test data is used for evaluating the model with the best choice of
hyperparameters, after refitting across *all* the train data.
Args:
x_{train,test}: input data for train/test
y_{train,test}: output labels for train/test
num_inner_splits: number of splits for cross-validation
parsers: parse_data parsers to use for splitting data
scoring_method: method to use for scoring test results; 'auc-roc'
(auROC) or 'auc-pr' (auPR)
models_to_use: list of models to test; if None, test all
feat_coeffs_out_tsv_f: if set, file handler to which to write
coefficients for each feature (linear models only; only for
the best estimator after hyperparameter search)
Returns:
dict {model: {input feats: metrics on test data for best choice of
hyperparameters for model}}
"""
# Check models_to_use
all_models = ['logit', 'l1_logit', 'l2_logit', 'l1l2_logit', 'gbt',
'rf', 'svm', 'mlp', 'lstm']
if models_to_use is None:
models_to_use = all_models
assert set(models_to_use).issubset(all_models)
# Set the input feats to use for different models
# Use the same choice for all models *except* lstm, which should be in a
# series form where each time step corresponds to a position
input_feats = {}
for m in all_models:
if m == 'lstm':
input_feats[m] = ['onehot']
else:
input_feats[m] = ['onehot-flat', 'onehot-simple', 'handcrafted',
'combined']
# Determine class weights
# For this, just use the 'onehot' input features; class weights should be
# the same across all
y_train_labels = list(y_train['onehot'])
class_weight = sklearn.utils.class_weight.compute_class_weight(
'balanced', sorted(np.unique(y_train_labels)), y_train_labels)
class_weight = list(class_weight)
class_weight = {i: weight for i, weight in enumerate(class_weight)}
# With models, perform cross-validation to determine hyperparameters
# Most of the built-in cross-validators find the best choice based on
# R^2; some of them do not support a custom scoring function via a
# `scoring=...` argument. So instead wrap the regression with a
# GridSearchCV object, which does support a custom scoring metric. Use
# spearman rank correlation coefficient for this.
def cv(feats):
return parsers[feats].split(x_train[feats], y_train[feats],
num_inner_splits, stratify_by_pos=True, yield_indices=True)
def auc_roc_f(y, y_pred):
return sklearn.metrics.roc_auc_score(y, y_pred)
def auc_pr_f(y, y_pred):
pr_precision, pr_recall, pr_thresholds = sklearn.metrics.precision_recall_curve(
y, y_pred, pos_label=1)
return sklearn.metrics.auc(pr_recall, pr_precision)
auc_roc_scorer = sklearn.metrics.make_scorer(auc_roc_f,
greater_is_better=True)
auc_pr_scorer = sklearn.metrics.make_scorer(auc_pr_f,
greater_is_better=True)
if scoring_method == 'auc-roc':
scorer = auc_roc_scorer
elif scoring_method == 'auc-pr':
scorer = auc_pr_scorer
else:
raise ValueError("Unknown scoring method %s" % scoring_method)
def fit_and_test_model(clf, model_name, model_desc, hyperparams, feats,
inspect_feat_coeffs=False):
"""Fit and test model.
Args:
clf: classifier model object
model_name: short string naming model
model_desc: string describing model
hyperparams: list [p] of hyperparameters where each p is a string
and clf.p gives the value chosen by the hyperparameter search
feats: input features type
inspect_feat_coeffs: if set, save the feature coefficients with
their descriptions and print the top ones (by absolute value)
Returns:
dict giving metrics for the best choice of model hyperparameters
"""
# Fit model
clf.fit(x_train[feats], y_train[feats])
if inspect_feat_coeffs:
# Combine coefficients with their descriptions
if hasattr(clf, 'coef_'):
coeffs = clf.coef_
else:
# clf is likely a CV (e.g., RandomizedSearchCV) object
coeffs = clf.best_estimator_.coef_
if len(coeffs) == 1:
coeffs = coeffs[0]
coef_descriptions = parsers[feats].baseline_descriptions
assert len(coeffs) == len(coef_descriptions)
cd = zip(coeffs, coef_descriptions)
# Sort (reverse) by absolute value of coefficient
cd_sorted = sorted(cd, key=lambda x: abs(x[0]), reverse=True)
# Print top 10 coefficients with descriptions
print('Top feature coefficients:')
for coeff, description in cd_sorted[:10]:
print(' ', description, ':', coeff)
# Save all feature coefficients to a file, if set
if feat_coeffs_out_tsv_f is not None:
for coeff, description in cd_sorted:
row = [model_name, feats, description, coeff]
line = '\t'.join(str(c) for c in row)
feat_coeffs_out_tsv_f.write(line + '\n')
# Test model
y_pred = clf.predict(x_test[feats])
y_pred_class = [0 if y < 0.5 else 1 for y in y_pred]
# Compute metrics (for auROC and auPR)
# This initially performed this calculation with *both* TensorFlow
# and scikit-learn to report both results. However, it seems that
# using TensorFlow for this calculation sometimes leads to a strange
# crash caused by the GPU running out of memory (possibly because
# there are multiple processes (jobs) for the hyperparameter search and
# TensorFlow tries to use all of the GPU's memory). The results between
# scikit-learn and TensorFlow were virtually identical for auROC, and
# where very close for auPR (reporting avg. precision along with auPR
# should alleviate calculation concerns).
auc_roc_sk = auc_roc_f(y_test[feats], y_pred)
auc_pr_sk = auc_pr_f(y_test[feats], y_pred)
avg_prec = sklearn.metrics.average_precision_score(y_test[feats],
y_pred)
accuracy = sklearn.metrics.accuracy_score(y_test[feats], y_pred_class)
# Print metrics
print('#'*20)
print("Classification with {}".format(model_desc))
if type(hyperparams) is list:
for p in hyperparams:
print(" best {} = {}".format(p, getattr(clf, p)))
else:
print(" best params = {}".format(hyperparams.best_params_))
print(" auROC (SK) = {}".format(auc_roc_sk))
print(" auPR (SK) = {}".format(auc_pr_sk))
print(" Avg. prec = {}".format(avg_prec))
print(" Accuracy = {}".format(accuracy))
print('#'*20)
return {'auc-roc': auc_roc_sk, 'auc-pr': auc_pr_sk,
'avg-prec': avg_prec, 'accuracy': accuracy,
'1_minus_auc-roc': 1.0-auc_roc_sk}
# Logistic regression (no regularization)
def logit(feats):
clf = sklearn.linear_model.LogisticRegression(penalty='none',
class_weight=class_weight, solver='lbfgs',
max_iter=100) # no CV because there are no hyperparameters
return fit_and_test_model(clf, 'logit', 'Logisitic regression',
hyperparams=[], feats=feats,
inspect_feat_coeffs=True)
# L1 logistic regression
def l1_logit(feats):
clf = sklearn.linear_model.LogisticRegression(penalty='l1',
class_weight=class_weight, solver='saga',
max_iter=100, tol=0.0001)
clf_cv = random_search_cv('l1_logit', clf, cv(feats), scorer)
return fit_and_test_model(clf_cv, 'l1_logit', 'L1 logistic regression',
hyperparams=clf_cv, feats=feats,
inspect_feat_coeffs=True)
# L2 logistic regression
def l2_logit(feats):
clf = sklearn.linear_model.LogisticRegression(penalty='l2',
class_weight=class_weight, solver='lbfgs',
max_iter=100, tol=0.0001)
clf_cv = random_search_cv('l2_logit', clf, cv(feats), scorer)
return fit_and_test_model(clf_cv, 'l2_logit', 'L2 logistic regression',
hyperparams=clf_cv, feats=feats,
inspect_feat_coeffs=True)
# Elastic net (L1+L2 logistic regression)
def l1l2_logit(feats):
clf = sklearn.linear_model.LogisticRegression(penalty='elasticnet',
class_weight=class_weight, solver='saga',
max_iter=100, tol=0.0001)
clf_cv = random_search_cv('l1l2_logit', clf, cv(feats), scorer)
return fit_and_test_model(clf_cv, 'l1l2_logit', 'L1+L2 logistic regression',
hyperparams=clf_cv, feats=feats,
inspect_feat_coeffs=True)
# Gradient-boosted classification trees
def gbt(feats):
# It seems this does not support class_weight
clf = sklearn.ensemble.GradientBoostingClassifier(loss='deviance',
tol=0.001)
clf_cv = random_search_cv('gbt', clf, cv(feats), scorer)
return fit_and_test_model(clf_cv, 'gbt', 'Gradient boosting classification',
hyperparams=clf_cv, feats=feats)
# Random forest classification
def rf(feats):
clf = sklearn.ensemble.RandomForestClassifier(criterion='gini',
class_weight=class_weight)
clf_cv = random_search_cv('rf', clf, cv(feats), scorer)
return fit_and_test_model(clf_cv, 'rf', 'Random forest classification',
hyperparams=clf_cv, feats=feats)
# SVM
def svm(feats):
# sklearn's SVC has a fit time that is quadratic in the number of
# samples; to be faster, this uses LinearSVC (with the downside being
# that it does not support higher-dimensional kernels)
clf = sklearn.svm.LinearSVC(class_weight=class_weight, tol=0.0001)
clf_cv = random_search_cv('svm', clf, cv(feats), scorer)
return fit_and_test_model(clf_cv, 'svm', 'SVM',
hyperparams=clf_cv, feats=feats)
# MLP
def mlp(feats):
clf = fnn.MultilayerPerceptron(parsers[feats].context_nt,
regression=False, class_weight=class_weight)
clf_cv = random_search_cv('mlp', clf, cv(feats), scorer)
return fit_and_test_model(clf_cv, 'mlp', 'Multilayer perceptron',
hyperparams=clf_cv, feats=feats)
# LSTM
def lstm(feats):
clf = rnn.LSTM(parsers[feats].context_nt,
regression=False, class_weight=class_weight)
clf_cv = random_search_cv('lstm', clf, cv(feats), scorer)
return fit_and_test_model(clf_cv, 'lstm', 'LSTM',
hyperparams=clf_cv, feats=feats)
metrics_for_models = {}
for model_name in models_to_use:
metrics_for_models[model_name] = {}
for feats in input_feats[model_name]:
print(("Running and evaluating model '%s' with input feature '%s'") %
(model_name, feats))
model_fn = locals()[model_name]
metrics_for_models[model_name][feats] = model_fn(feats)
return metrics_for_models
def regress(x_train, y_train, x_test, y_test,
parsers,
num_inner_splits=5,
scoring_method='rho',
models_to_use=None,
feat_coeffs_out_tsv_f=None):
"""Perform regression.
Test data is used for evaluating the model with the best choice of
hyperparameters, after refitting across *all* the train data.
Args:
x_{train,test}: input data for train/test
y_{train,test}: output labels for train/test
num_inner_splits: number of splits for cross-validation
parsers: parse_data parsers to use for splitting data
scoring_method: method to use for scoring test results; 'mse' (mean
squared error) or 'rho' (Spearman's rank correlation)
models_to_use: list of models to test; if None, test all
feat_coeffs_out_tsv_f: if set, file handler to which to write
coefficients for each feature (linear models only; only for
the best estimator after hyperparameter search)
Returns:
dict {model: {input feats: metrics on test data for best choice of
hyperparameters for model}}
"""
# Check models_to_use
all_models = ['lr', 'l1_lr', 'l2_lr', 'l1l2_lr', 'gbt', 'rf', 'mlp', 'lstm']
if models_to_use is None:
models_to_use = all_models
assert set(models_to_use).issubset(all_models)
# Set the input feats to use for different models
# Use the same choice for all models *except* lstm, which should be in a
# series form where each time step corresponds to a position
input_feats = {}
for m in all_models:
if m == 'lstm':
input_feats[m] = ['onehot']
else:
input_feats[m] = ['onehot-flat', 'onehot-simple', 'handcrafted',
'combined']
# With models, perform cross-validation to determine hyperparameters
# Most of the built-in cross-validators find the best choice based on
# R^2; some of them do not support a custom scoring function via a
# `scoring=...` argument. So instead wrap the regression with a
# GridSearchCV object, which does support a custom scoring metric. Use
# spearman rank correlation coefficient for this.
def cv(feats):
return parsers[feats].split(x_train[feats], y_train[feats],
num_inner_splits, stratify_by_pos=True, yield_indices=True)
def rho_f(y, y_pred):
rho, _ = scipy.stats.spearmanr(y, y_pred)
return rho
rho_scorer = sklearn.metrics.make_scorer(rho_f,
greater_is_better=True)
mse_scorer = sklearn.metrics.make_scorer(
sklearn.metrics.mean_squared_error,
greater_is_better=False)
if scoring_method == 'mse':
scorer = mse_scorer
elif scoring_method == 'rho':
scorer = rho_scorer
else:
raise ValueError("Unknown scoring method %s" % scoring_method)
def fit_and_test_model(reg, model_name, model_desc, hyperparams, feats,
inspect_feat_coeffs=False):
"""Fit and test model.
Args:
reg: regression model object
model_name: short string naming model
model_desc: string describing model
hyperparams: list [p] of hyperparameters where each p is a string
and reg.p gives the value chosen by the hyperparameter search
feats: input features type
inspect_feat_coeffs: if set, save the feature coefficients with
their descriptions and print the top ones (by absolute value)
Returns:
dict giving metrics for the best choice of model hyperparameters
"""
# Fit model
reg.fit(x_train[feats], y_train[feats])
if inspect_feat_coeffs:
# Combine coefficients with their descriptions
if hasattr(reg, 'coef_'):
coeffs = reg.coef_
else:
# ref is likely a CV (e.g., RandomizedSearchCV) object
coeffs = reg.best_estimator_.coef_
if len(coeffs) == 1:
coeffs = coeffs[0]
coef_descriptions = parsers[feats].baseline_descriptions
assert len(coeffs) == len(coef_descriptions)
cd = zip(coeffs, coef_descriptions)
# Sort (reverse) by absolute value of coefficient
cd_sorted = sorted(cd, key=lambda x: abs(x[0]), reverse=True)
# Print top 10 coefficients with descriptions
print('Top feature coefficients:')
for coeff, description in cd_sorted[:10]:
print(' ', description, ':', coeff)
# Save all feature coefficients to a file, if set
if feat_coeffs_out_tsv_f is not None:
for coeff, description in cd_sorted:
row = [model_name, feats, description, coeff]
line = '\t'.join(str(c) for c in row)
feat_coeffs_out_tsv_f.write(line + '\n')
# Test model
y_pred = reg.predict(x_test[feats])
# Compute metrics
mse = sklearn.metrics.mean_squared_error(y_test[feats], y_pred)
mae = sklearn.metrics.mean_absolute_error(y_test[feats], y_pred)
R2 = sklearn.metrics.r2_score(y_test[feats], y_pred)
r, _ = scipy.stats.pearsonr(y_test[feats], y_pred)
rho, _ = scipy.stats.spearmanr(y_test[feats], y_pred)
# Note that R2 does not necessarily equal r^2 here. The value R2
# is computed by definition of R^2 (1 minus (residual sum of
# squares)/(total sum of squares)) from the true vs. predicted
# values. This is why R2 can be negative: it can do an even worse
# job with prediction than just predicting the mean. r is computed
# by simple least squares regression between y_test and y_pred, and
# finding the Pearson's r of this curve (since this is simple
# linear regression, r^2 should be nonnegative). The value R2 measures
# the goodness-of-fit of the specific linear correlation
# y_pred = y_test, whereas r measures the correlation from the
# regression (y_pred = m*y_test + b).
# Print metrics
print('#'*20)
print("Regression with {}".format(model_desc))
if type(hyperparams) is list:
for p in hyperparams:
print(" best {} = {}".format(p, getattr(reg, p)))
else:
print(" best params = {}".format(hyperparams.best_params_))
print(" MSE = {}".format(mse))
print(" MAE = {}".format(mae))
print(" R^2 = {}".format(R2))
print(" r = {}".format(r))
print(" rho = {}".format(rho))
print('#'*20)
return {'mse': mse, 'mae': mae, 'r2': R2, 'r': r, 'rho': rho,
'1_minus_rho': 1.0-rho}
# Linear regression (no regularization)
def lr(feats):
reg = sklearn.linear_model.LinearRegression(copy_X=True) # no CV because there are no hyperparameters
return fit_and_test_model(reg, 'lr', 'Linear regression',
hyperparams=[], feats=feats,
inspect_feat_coeffs=True)
# Note:
# For below models, increasing `max_iter` or increasing `tol` can reduce
# the warning 'ConvergenceWarning: Objective did not converge.'
# L1 linear regression
def l1_lr(feats):
reg = sklearn.linear_model.Lasso(max_iter=1000, tol=0.0001, copy_X=True)
reg_cv = random_search_cv('l1_lr', reg, cv(feats), scorer)
return fit_and_test_model(reg_cv, 'l1_lr', 'L1 linear regression',
hyperparams=reg_cv, feats=feats,
inspect_feat_coeffs=True)
# L2 linear regression
def l2_lr(feats):
reg = sklearn.linear_model.Ridge(max_iter=1000, tol=0.0001, copy_X=True)
reg_cv = random_search_cv('l2_lr', reg, cv(feats), scorer)
return fit_and_test_model(reg_cv, 'l2_lr', 'L2 linear regression',
hyperparams=reg_cv, feats=feats,
inspect_feat_coeffs=True)
# Elastic net (L1+L2 linear regression)
# Recommendation for l1_ratio is to place more values close to 1 (lasso)
# and fewer closer to 0 (ridge)
# A note to explain some potential confusion in the choice of
# l1_ratio: Ridge might be better than Lasso according to rho, but
# l1_ratio could still be chosen to be high (close to Lasso)
# especially if Lasso/Ridge are close; in part, this could be because
# fit_and_test_model() prints values on a hold-out set, but chooses
# hyperparameters on splits of the train set
def l1l2_lr(feats):
reg = sklearn.linear_model.ElasticNet(max_iter=1000, tol=0.0001, copy_X=True)
reg_cv = random_search_cv('l1l2_lr', reg, cv(feats), scorer)
return fit_and_test_model(reg_cv, 'l1l2_lr', 'L1+L2 linear regression',
hyperparams=reg_cv, feats=feats,
inspect_feat_coeffs=True)
# Gradient-boosted regression trees
def gbt(feats):
reg = sklearn.ensemble.GradientBoostingRegressor(loss='ls', tol=0.0001)
reg_cv = random_search_cv('gbt', reg, cv(feats), scorer)
return fit_and_test_model(reg_cv, 'gbt', 'Gradient Boosting regression',
hyperparams=reg_cv, feats=feats)
# Random forest regression
def rf(feats):
reg = sklearn.ensemble.RandomForestRegressor(criterion='mse')
reg_cv = random_search_cv('rf', reg, cv(feats), scorer)
return fit_and_test_model(reg_cv, 'rf', 'Random forest regression',
hyperparams=reg_cv, feats=feats)
# MLP
def mlp(feats):
reg = fnn.MultilayerPerceptron(parsers[feats].context_nt,
regression=True)
reg_cv = random_search_cv('mlp', reg, cv(feats), scorer)
return fit_and_test_model(reg_cv, 'mlp', 'Multilayer perceptron',
hyperparams=reg_cv, feats=feats)
# LSTM
def lstm(feats):
reg = rnn.LSTM(parsers[feats].context_nt,
regression=True)
reg_cv = random_search_cv('lstm', reg, cv(feats), scorer)
return fit_and_test_model(reg_cv, 'lstm', 'LSTM',
hyperparams=reg_cv, feats=feats)
metrics_for_models = {}
for model_name in models_to_use:
metrics_for_models[model_name] = {}
for feats in input_feats[model_name]:
print(("Running and evaluating model '%s' with input feature '%s'") %
(model_name, feats))
model_fn = locals()[model_name]
metrics_for_models[model_name][feats] = model_fn(feats)
return metrics_for_models
def nested_cross_validate(x, y, num_outer_splits,
regression, parsers, regression_scoring_method=None,
models_to_use=None, feat_coeffs_out_tsv_f=None,
outer_splits_to_run=None):
"""Perform nested cross-validation to validate model and search.
Args:
x: input data
y: labels
num_outer_splits: number of folds in the outer cross-validation
procedure
regression: True if performing regression; False if classification
parsers: parse_data parsers to use for splitting data
regression_scoring_method: if regression, method to use for
evaluating a model ('mse' or 'rho')
models_to_use: list of models to test; if None, test all
feat_coeffs_out_tsv_f: if set, file handler to which to write
coefficients for each feature (linear models only; only for
the best estimator after hyperparameter search)
outer_splits_to_run: if set, a list of outer splits to run (0-based);
if not set, run all
Returns:
list x where each x[i] is an output of regress() or classify() on
an outer fold
"""
fold_results = []
i = 0
outer_split_iters = []
outer_split_iters_feats = []
for k in parsers.keys():
outer_split_iters += [parsers[k].split(x[k], y[k],
num_splits=num_outer_splits, stratify_by_pos=True)]
outer_split_iters_feats += [k]
for xy in zip(*outer_split_iters):
print('STARTING OUTER FOLD {} of {}'.format(i+1, num_outer_splits))
if outer_splits_to_run is not None:
if i not in outer_splits_to_run:
print(' Skipping this outer split')
fold_results += [None]
i += 1
# Advance random number generator
random.random()
np.random.random()
tf.random.uniform([1])
continue
x_train = {}
y_train = {}
x_validate = {}
y_validate = {}
for k, xy_k in zip(outer_split_iters_feats, xy):
x_train[k], y_train[k], x_validate[k], y_validate[k] = xy_k
assert len(x_train[k]) == len(y_train[k])
assert len(x_validate[k]) == len(y_validate[k])
print(('Input feats {}: There are n={} train points and n={} '
'validation points').format(
k, len(x_train[k]), len(x_validate[k])))
# Search for hyperparameters on this outer fold of the data
if regression:
metrics_for_models = regress(x_train, y_train,
x_validate, y_validate,
parsers,
scoring_method=regression_scoring_method,
models_to_use=models_to_use,
feat_coeffs_out_tsv_f=feat_coeffs_out_tsv_f)
else:
metrics_for_models = classify(x_train, y_train,
x_validate, y_validate, parsers,
models_to_use=models_to_use,
feat_coeffs_out_tsv_f=feat_coeffs_out_tsv_f)
fold_results += [metrics_for_models]
# Print metrics on this fold
print('Results on fold {}'.format(i+1))
print(' Metrics on validation data')
for model in metrics_for_models.keys():
print(' Model: {}'.format(model))
for feats in metrics_for_models[model].keys():
print(' Input feats: {}'.format(feats))
for metric in metrics_for_models[model][feats].keys():
print(' {} = {}'.format(metric,
metrics_for_models[model][feats][metric]))
print(('FINISHED OUTER FOLD {} of {}').format(i+1, num_outer_splits))
i += 1
return fold_results
def main():
# Read arguments
args = parse_args()
# Set seed
set_seed(args.seed)
# Read data multiple times, each with different types of features
x_train = {}
y_train = {}
x_validate = {}
y_validate = {}
x_test = {}
y_test = {}
parsers = {}
split_frac = (1.0 - args.test_split_frac, 0.0, args.test_split_frac)
for feats in ['onehot', 'onehot-flat', 'onehot-simple', 'handcrafted',
'combined']:
if feats == 'onehot':
# For parse_data, treat this as not constructing features for
# baseline
make_feats_for_baseline = None
else:
make_feats_for_baseline = feats
data_parser = predictor.read_data(args,
split_frac=split_frac,
make_feats_for_baseline=make_feats_for_baseline)
x_train[feats], y_train[feats] = data_parser.train_set()
x_validate[feats], y_validate[feats] = data_parser.validate_set()
x_test[feats], y_test[feats] = data_parser.test_set()
parsers[feats] = data_parser
# Convert column to 1D array
y_train[feats] = y_train[feats].ravel()
y_validate[feats] = y_validate[feats].ravel()
y_test[feats] = y_test[feats].ravel()
# Determine, based on the dataset, whether to do regression or
# classification
if args.dataset == 'cas13':
if args.cas13_classify:
regression = False
else:
regression = True
if args.nested_cross_val:
# Perform nested cross-validation
if args.test_split_frac > 0:
print(('WARNING: Performing nested cross-validation but there is '
'unused test data; it may make sense to set '
'--test-split-frac to 0'))
if args.nested_cross_val_feat_coeffs_out_tsv:
feat_coeffs_out_tsv_f = open(
args.nested_cross_val_feat_coeffs_out_tsv,
'w')
header = ['model', 'feats_type', 'feat_description', 'coeff']
feat_coeffs_out_tsv_f.write('\t'.join(header) + '\n')
else:
feat_coeffs_out_tsv_f = None
fold_results = nested_cross_validate(x_train, y_train,
args.nested_cross_val_outer_num_splits,
regression,
parsers,
regression_scoring_method=args.regression_scoring_method,
models_to_use=args.models_to_use,
feat_coeffs_out_tsv_f=feat_coeffs_out_tsv_f,
outer_splits_to_run=args.nested_cross_val_run_for)
if feat_coeffs_out_tsv_f is not None:
feat_coeffs_out_tsv_f.close()
if args.nested_cross_val_out_tsv:
# Write fold results to a tsv file
if regression:
metrics = ['mse', 'mae', 'r', 'r2', 'rho']
else:
metrics = ['auc-roc', 'auc-pr', 'avg-prec', 'accuracy']
with open(args.nested_cross_val_out_tsv, 'w') as fw:
header = ['fold', 'model', 'feats_type'] + metrics
fw.write('\t'.join(header) + '\n')
for fold in range(len(fold_results)):
metrics_for_models = fold_results[fold]
if metrics_for_models is None:
continue
for model in metrics_for_models.keys():
for feats in metrics_for_models[model].keys():
row = [fold, model, feats]
for metric in metrics:
row += [metrics_for_models[model][feats][metric]]
fw.write('\t'.join(str(r) for r in row) + '\n')
else:
# Simply perform a hyperparameter search for each model
if regression:
regress(x_train, y_train, x_test, y_test,
parsers,
scoring_method=args.regression_scoring_method,
models_to_use=args.models_to_use)
else:
classify(x_train, y_train, x_test, y_test, parsers,
models_to_use=args.models_to_use)
if __name__ == "__main__":
main()
```
#### File: jingyi7777/adapt-seq-design/predictor.py
```python
import argparse
from collections import defaultdict
import gzip
import os
import pickle
import fnn
import parse_data
import numpy as np
import scipy
import sklearn
import sklearn.metrics
import tensorflow as tf
__author__ = '<NAME> <<EMAIL>>'
def parse_args():
"""Parse arguments.
Returns:
argument namespace
"""
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--load-model',
help=("Path from which to load parameters and model weights "
"for model found by hyperparameter search; if set, "
"any other arguments provided about the model "
"architecture or hyperparameters will be overridden and "
"this will skip training and only test the model"))
parser.add_argument('--load-model-as-tf-savedmodel',
help=("Path to directory containing a model in TensorFlow's "
"SavedModel architecture; this cannot be set along "
"with --load-model"))
parser.add_argument('--dataset',
choices=['cas13'],
default='cas13',
help=("Dataset to use."))
parser.add_argument('--cas13-subset',
choices=['exp', 'pos', 'neg', 'exp-and-pos'],
help=("Use a subset of the Cas13 data. See parse_data module "
"for descriptions of the subsets. To use all data, do not "
"set."))
parser.add_argument('--cas13-classify',
action='store_true',
help=("If set, only classify Cas13 activity into inactive/active"))
parser.add_argument('--cas13-regress-on-all',
action='store_true',
help=("If set, perform regression for Cas13 data on all data "
"(this can be reduced using --cas13-subset)"))
parser.add_argument('--cas13-regress-only-on-active',
action='store_true',
help=("If set, perform regression for Cas13 data only on the "
"active class"))
parser.add_argument('--cas13-normalize-crrna-activity',
action='store_true',
help=("If set, normalize the activity of each crRNA (guide) "
"across its targets to have mean 0 and stdev 1; this means "
"prediction is performed based on target differences (e.g., "
"mismatches) rather than inherent sequence of the crRNA"))
parser.add_argument('--cas13-use-difference-from-wildtype-activity',
action='store_true',
help=("If set, use the activity value of a guide g and target t "
"pair to be the difference between the measured activity of "
"g-t and the mean activity between g and all wildtype "
"(matching) targets of g; this means prediction is "
"performed based on targeted differences (e.g., mismatches) "
"rather than inherent sequence of the crRNA"))
parser.add_argument('--use-median-measurement',
action='store_true',
help=("If set, use the median measurment across replicates "
"(instead, resample)"))
parser.add_argument('--context-nt',
type=int,
default=10,
help=("nt of target sequence context to include alongside each "
"guide"))
parser.add_argument('--conv-filter-width',
type=int,
nargs='+',
help=("Width of the convolutional filter (nt) (or multiple widths "
"to perform parallel convolutions). If not set, do not "
"use convolutional layers (or the batch norm or pooling "
"that follow it)."))
parser.add_argument('--conv-num-filters',
type=int,
default=20,
help=("Number of convolutional filters (i.e., output channels) "
"in the first layer"))
parser.add_argument('--pool-window-width',
type=int,
default=2,
help=("Width of the pooling window"))
parser.add_argument('--fully-connected-dim',
type=int,
nargs='+',
default=[20],
help=("Dimension of each fully connected layer (i.e., of its "
"output space); specify multiple dimensions for multiple "
"fully connected layers"))
parser.add_argument('--pool-strategy',
choices=['max', 'avg', 'max-and-avg'],
default='max',
help=("For pooling, 'max' only does max pooling; 'avg' only does "
"average pooling; 'max-and-avg' does both and concatenates."))
parser.add_argument('--locally-connected-width',
type=int,
nargs='+',
help=("If set, width (kernel size) of the locally connected layer. "
"Use multiple widths to have parallel locally connected layers "
"that get concatenated. If not set, do not use have a locally "
"connected layer."))
parser.add_argument('--locally-connected-dim',
type=int,
default=1,
help=("Dimension of each locally connected layer (i.e., of its "
"output space)"))
parser.add_argument('--skip-batch-norm',
action='store_true',
help=("If set, skip batch normalization layer"))
parser.add_argument('--add-gc-content',
action='store_true',
help=("If set, add GC content of a guide explicitly into the "
"first fully connected layer of the predictor"))
parser.add_argument('--activation-fn',
choices=['relu', 'elu'],
default='relu',
help=("Activation function to use on hidden layers"))
parser.add_argument('--dropout-rate',
type=float,
default=0.25,
help=("Rate of dropout in between the 2 fully connected layers"))
parser.add_argument('--l2-factor',
type=float,
default=0,
help=("L2 regularization factor. This is applied to weights "
"(kernal_regularizer). Note that this does not regularize "
"bias of activity."))
parser.add_argument('--sample-weight-scaling-factor',
type=float,
default=0,
help=("Hyperparameter p where sample weight is (1 + p*["
"difference in activity from mean wildtype activity]); "
"p must be >= 0. Note that p=0 means that all samples are "
"weighted the same; higher p means that guide-target pairs "
"whose activity deviates from the wildtype from the guide "
"are treated as more important. This is only used for "
"regression."))
parser.add_argument('--batch-size',
type=int,
default=32,
help=("Batch size"))
parser.add_argument('--learning-rate',
type=float,
default=0.00001,
help=("Learning rate for Adam optimizer"))
parser.add_argument('--max-num-epochs',
type=int,
default=1000,
help=("Maximum number of training epochs (this employs early "
"stopping)"))
parser.add_argument('--test-split-frac',
type=float,
default=0.3,
help=("Fraction of the dataset to use for testing the final "
"model"))
parser.add_argument('--seed',
type=int,
default=1,
help=("Random seed"))
parser.add_argument('--serialize-model-with-tf-savedmodel',
help=("Serialize the model with TensorFlow's SavedModel format. "
"This should be a directory in which to serialize the "
"model; this saves the entire model (architecture, "
"weights, training configuration"))
parser.add_argument('--plot-roc-curve',
help=("If set, path to PDF at which to save plot of ROC curve"))
parser.add_argument('--plot-predictions',
help=("If set, path to PDF at which to save plot of predictions "
"vs. true values"))
parser.add_argument('--write-test-tsv',
help=("If set, path to .tsv.gz at which to write test results, "
"including sequences in the test set and predictions "
"(one row per test data point)"))
parser.add_argument('--determine-classifier-threshold-for-precision',
type=float,
default=0.975,
help=("If set, determine thresholds (across folds) that "
"achieve this precision; does not use test data"))
parser.add_argument('--filter-test-data-by-classification-score',
nargs=2,
help=("If set, only test on data that is classified as active. "
"This consists of 2 arguments: (1) path to TSV file "
"written by test functions with classification scores; "
"(2) score to use as threshold for classifying (>= "
"threshold is active). This is useful when evaluating "
"regression models trained on active data points; we "
"want to test only on data that has been classified as "
"active."))
args = parser.parse_args()
# Print the arguments provided
print(args)
return args
def set_seed(seed):
"""Set tensorflow and numpy seed.
Args:
seed: random seed
"""
tf.random.set_seed(seed)
np.random.seed(seed)
def read_data(args, split_frac=None, make_feats_for_baseline=None):
"""Read input/output data.
Args:
args: argument namespace
split_frac: if set, (train, validate, test) fractions (must sum
to 1); if None, use 0.3 for the test set, 0.7*(2/3) for the
train set, and 0.7*(1/3) for the validate set
use_validation: if True, have the validation set be 1/3 of what would
be the training set (and the training set be the other 2/3); if
False, do not have a validation set
make_feats_for_baseline: if set, make feature vector for baseline
models; see parse_data module for description of values
Returns:
data parser object from parse_data
"""
if make_feats_for_baseline is not None and args.dataset != 'cas13':
raise Exception("make_feats_for_baseline only works with Cas13 data")
# Read data
if args.dataset == 'cas13':
parser_class = parse_data.Cas13ActivityParser
subset = args.cas13_subset
if args.cas13_classify:
regression = False
else:
regression = True
if split_frac is None:
test_frac = 0.3
train_frac = (1.0 - test_frac) * (2.0/3.0)
validation_frac = (1.0 - test_frac) * (1.0/3.0)
else:
train_frac, validation_frac, test_frac = split_frac
data_parser = parser_class(
subset=subset,
context_nt=args.context_nt,
split=(train_frac, validation_frac, test_frac),
shuffle_seed=args.seed,
stratify_by_pos=True,
use_median_measurement=args.use_median_measurement)
if args.dataset == 'cas13':
classify_activity = args.cas13_classify
regress_on_all = args.cas13_regress_on_all
regress_only_on_active = args.cas13_regress_only_on_active
data_parser.set_activity_mode(
classify_activity, regress_on_all, regress_only_on_active)
if make_feats_for_baseline is not None:
data_parser.set_make_feats_for_baseline(make_feats_for_baseline)
if args.cas13_normalize_crrna_activity:
data_parser.set_normalize_crrna_activity()
if args.cas13_use_difference_from_wildtype_activity:
data_parser.set_use_difference_from_wildtype_activity()
data_parser.read()
x_train, y_train = data_parser.train_set()
x_validate, y_validate = data_parser.validate_set()
x_test, y_test = data_parser.test_set()
# Print the size of each data set
data_sizes = 'DATA SIZES - Train: {}, Validate: {}, Test: {}'
print(data_sizes.format(len(x_train), len(x_validate), len(x_test)))
if regression:
# Print the mean outputs and its variance
print('Mean train output: {}'.format(np.mean(y_train)))
print('Variance of train output: {}'.format(np.var(y_train)))
else:
# Print the fraction of the training data points that are in each class
classes = set(tuple(y) for y in y_train)
for c in classes:
num_c = sum(1 for y in y_train if tuple(y) == c)
frac_c = float(num_c) / len(y_train)
frac_c_msg = 'Fraction of train data in class {}: {}'
print(frac_c_msg.format(c, frac_c))
if len(x_validate) == 0:
print('No validation data')
else:
for c in classes:
num_c = sum(1 for y in y_validate if tuple(y) == c)
frac_c = float(num_c) / len(y_validate)
frac_c_msg = 'Fraction of validate data in class {}: {}'
print(frac_c_msg.format(c, frac_c))
for c in classes:
num_c = sum(1 for y in y_test if tuple(y) == c)
frac_c = float(num_c) / len(y_test)
frac_c_msg = 'Fraction of test data in class {}: {}'
print(frac_c_msg.format(c, frac_c))
if args.dataset == 'cas13' and args.cas13_classify:
print('Note that inactive=1 and active=0')
return data_parser
def make_dataset_and_batch(x, y, batch_size=32):
"""Make tensorflow dataset and batch.
Args:
x: input data
y: outputs (labels if classification)
batch_size: batch size
Returns:
batched tf.data.Dataset object
"""
return tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size)
def load_model(load_path, params, x_train, y_train):
"""Construct model and load weights according to hyperparameter search.
Args:
load_path: path containing model weights
params: dict of parameters
x_train, y_train: train data (only needed for data shape and class
weights)
Returns:..
fnn.CasCNNWithParallelFilters object
"""
# First construct the model
model = construct_model(params, x_train.shape,
regression=params['regression'],
y_train=y_train,
compile_for_keras=True)
# Note: Previoulsly, this would have to train the model on one
# data point (reason below); however, this is no longer needed with Keras
# See https://www.tensorflow.org/beta/guide/keras/saving_and_serializing
# for details on loading a serialized subclassed model
# To initialize variables used by the optimizers and any stateful metric
# variables, we need to train it on some data before calling `load_weights`;
# note that it appears this is necessary (otherwise, there are no variables
# in the model, and nothing gets loaded)
# Only train the models on one data point, and for 1 epoch
def copy_weights(model):
# Copy weights, so we can verify that they changed after loading
return [tf.Variable(w) for w in model.weights]
def weights_are_eq(weights1, weights2):
# Determine whether weights1 == weights2
for w1, w2 in zip(weights1, weights2):
# 'w1' and 'w2' are each collections of weights (e.g., the kernel
# for some layer); they are tf.Variable objects (effectively,
# tensors)
# Make a tensor containing element-wise boolean comparisons (it
# is a 1D tensor with True/False)
elwise_eq = tf.equal(w1, w2)
# Check if all elements in 'elwise_eq' are True (this will make a
# Tensor with one element, True or False)
all_are_eq_tensor = tf.reduce_all(elwise_eq)
# Convert the tensor 'all_are_eq_tensor' to a boolean
all_are_eq = all_are_eq_tensor.numpy()
if not all_are_eq:
return False
return True
def load_weights(model, fn):
# Load weights
# There are some concerns about whether weights are actually being
# loaded (e.g., https://github.com/tensorflow/tensorflow/issues/27937),
# so check that they have changed after calling `load_weights`
# Use expect_partial() to silence warnings because this will not
# load optimizer parameters, which are loaded in construct_model()
w_before = copy_weights(model)
w_before2 = copy_weights(model)
model.load_weights(os.path.join(load_path, fn)).expect_partial()
w_after = copy_weights(model)
w_after2 = copy_weights(model)
assert (weights_are_eq(w_before, w_before2) is True)
assert (weights_are_eq(w_before, w_after) is False)
assert (weights_are_eq(w_after, w_after2) is True)
load_weights(model, 'model.weights')
return model
def construct_model(params, shape, regression=False, compile_for_keras=True,
y_train=None, parallelize_over_gpus=False):
"""Construct model.
This uses the fnn module.
This can also compile the model for Keras, to use multiple GPUs if
available.
Args:
params: dict of hyperparameters
shape: shape of input data; only used for printing model summary
regression: if True, perform regression; if False, classification
compile_for_keras: if set, compile for keras
y_train: training data to use for computing class weights; only needed
if compile_for_keras is True and regression is False
parallelize_over_gpus: if True, parallelize over all available GPUs
Returns:
fnn.CasCNNWithParallelFilters object
"""
if not compile_for_keras:
# Just return a model
return fnn.construct_model(params, shape, regression=regression)
def make():
model = fnn.construct_model(params, shape, regression=regression)
# Define an optimizer, loss, metrics, etc.
if model.regression:
# When doing regression, sometimes the output would always be the
# same value regardless of input; decreasing the learning rate fixed this
optimizer = tf.keras.optimizers.Adam(lr=model.learning_rate)
loss = 'mse'
# Note that using other custom metrics like R^2, Pearson, etc. (as
# implemented above) seems to raise errors; they are really only
# needed during testing
metrics = ['mse', 'mae']
model.class_weight = None
else:
optimizer = tf.keras.optimizers.Adam(lr=model.learning_rate)
loss = 'binary_crossentropy' # using class_weight should weight
# Note that using other custom metrics like auROC (as implemented
# above) seems to raise errors; they are really only needed during
# testing
metrics = ['bce', 'accuracy']
assert y_train is not None
y_train_labels = [y_train[i][0] for i in range(len(y_train))]
class_weight = sklearn.utils.class_weight.compute_class_weight(
'balanced', sorted(np.unique(y_train_labels)), y_train_labels)
model.class_weight = {i: weight for i, weight in enumerate(class_weight)}
# Compile the model
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
return model
if parallelize_over_gpus:
# Use a MirroredStrategy to take advantage of multiple GPUs, if there are
# multiple
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = make()
else:
model = make()
return model
def pred_from_nt(model, pairs):
"""Predict activity from nucleotide sequence.
Args:
model: model object with call() function
pairs: list of tuples (target with context, guide)
Returns:
output of model.call()
"""
FASTA_CODES = {'A': set(('A')),
'T': set(('T')),
'C': set(('C')),
'G': set(('G')),
'K': set(('G', 'T')),
'M': set(('A', 'C')),
'R': set(('A', 'G')),
'Y': set(('C', 'T')),
'S': set(('C', 'G')),
'W': set(('A', 'T')),
'B': set(('C', 'G', 'T')),
'V': set(('A', 'C', 'G')),
'H': set(('A', 'C', 'T')),
'D': set(('A', 'G', 'T')),
'N': set(('A', 'T', 'C', 'G'))}
onehot_idx = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
def onehot(b):
# One-hot encoding of base b
real_bases = FASTA_CODES[b]
v = [0, 0, 0, 0]
for b_real in real_bases:
assert b_real in onehot_idx.keys()
v[onehot_idx[b_real]] = 1.0 / len(real_bases)
return v
context_nt = model.context_nt
l = 2*context_nt + len(pairs[0][1])
x = np.empty((len(pairs), l, 8), dtype='f')
for i, (target_with_context, guide) in enumerate(pairs):
assert len(target_with_context) == 2*context_nt + len(guide)
# Determine one-hot encodings -- i.e., an input vector
input_vec = []
for pos in range(context_nt):
v_target = onehot(target_with_context[pos])
v_guide = [0, 0, 0, 0]
input_vec += [v_target + v_guide]
for pos in range(len(guide)):
v_target = onehot(target_with_context[context_nt + pos])
v_guide = onehot(guide[pos])
input_vec += [v_target + v_guide]
for pos in range(context_nt):
v_target = onehot(target_with_context[context_nt + len(guide) + pos])
v_guide = [0, 0, 0, 0]
input_vec += [v_target + v_guide]
input_vec = np.array(input_vec, dtype='f')
x[i] = input_vec
pred_activity = model.call(x, training=False)
pred_activity = [p[0] for p in pred_activity.numpy()]
return pred_activity
def load_model_for_cas13_regression_on_active(load_path):
"""Construct model and load parameters and weights.
This wraps load_model(), without the need to specify x_train, etc. for
initializing variables.
Args:
load_path: path containing model weights
Returns:..
fnn.CasCNNWithParallelFilters object
"""
# Load parameters
load_path_params = os.path.join(load_path,
'model.params.pkl')
with open(load_path_params, 'rb') as f:
saved_params = pickle.load(f)
params = {'dataset': 'cas13', 'cas13_subset': 'exp-and-pos',
'cas13_regress_only_on_active': True}
for k, v in saved_params.items():
params[k] = v
# Load data; we only need 1 data point, which is used to initialize
# variables
parser_class = parse_data.Cas13ActivityParser
subset = 'exp-and-pos'
regression = True
test_frac = 0.3
train_frac = (1.0 - test_frac) * (2.0/3.0)
validation_frac = (1.0 - test_frac) * (1.0/3.0)
context_nt = params['context_nt']
data_parser = parser_class(
subset=subset,
context_nt=context_nt,
split=(train_frac, validation_frac, test_frac),
shuffle_seed=1,
stratify_by_pos=True)
data_parser.set_activity_mode(False, False, True)
data_parser.read()
x_train, y_train = data_parser.train_set()
# Load the model
return load_model(load_path, params, x_train, y_train)
def determine_classifier_threshold_for_precision(params, x, y,
num_splits, data_parser, precision_threshold):
"""Find a threshold, via cross-valiation, to achieve a desired precision.
This focuses on precision because it is an important metric for
deploying assays.
It finds the smallest threshold that achieves a desired precision.
It does this across multiple splits of the training data.
Args:
params: model parameters (model should *not* be pre-trained)
x, y: data to perform cross-validation with
num_splits: number of folds to compute threshold
data_parser: object to parse data from parse_data
precision_threshold: desired threshold on precision
Returns:
list of thresholds, one per split
"""
# Construct a function that the test function will callback
best_thresholds = []
def find_threshold(y_true, y_pred):
# Compute threshold
pr_curve = sklearn.metrics.precision_recall_curve(y_true, y_pred)
precision, recall, thresholds = pr_curve
# Find the smallest threshold (highest i) where precision is
# >= precision_threshold
for i, prec in enumerate(precision):
if prec >= precision_threshold:
thres = float(thresholds[i])
break
best_thresholds.append(thres)
import predictor_hyperparam_search as phs
phs.cross_validate(params, x, y, num_splits, False,
callback=find_threshold, dp=data_parser)
return best_thresholds
def filter_test_data_by_classification_score(x_test, y_test,
data_parser, classification_test_tsv, score_threshold):
"""Select test data points that are classified as positive.
This is useful if we wish to evaluate regression that was trained on active
data points. We would first classify the test data, and then only
evaluate regression using the data points that are classified
as active.
Args:
x_test, y_test: test data
data_parser: object to parse data from parse_data
classification_test_tsv: the output TSV file ('write_test_tsv')
written by the test functions
score_threshold: classification score (between 0 and 1); deem
all test data with scores >= SCORE_THRESHOLD to be
active/positive
Returns:
list of tuples (x_test, y_test), filtered to only
contain active data points
"""
# Read all rows in classification_test_tsv
header_idx = {}
rows = []
with gzip.open(classification_test_tsv, 'rt') as f:
for i, line in enumerate(f):
ls = line.rstrip().split('\t')
if i == 0:
# Parse header
for j in range(len(ls)):
header_idx[ls[j]] = j
else:
rows += [ls]
rows_new = []
for row in rows:
row_dict = {k: row[header_idx[k]] for k in header_idx.keys()}
rows_new += [row_dict]
rows = rows_new
# Convert x_test, y_test into the same encoding that is
# used in the test TSV; keep just the target and guide
# as strings, and crRNA position, which is enough to
# uniquely identify data points (up to technical replicates)
# In particular, map a tuple of that to indices of the test
# data
encoding_idx = defaultdict(list)
for i in range(len(x_test)):
m = data_parser.seq_features_from_encoding(x_test[i])
crrna_pos = data_parser.pos_for_input(x_test[i])
enc = (m['target'], m['guide'], crrna_pos)
encoding_idx[enc].append(i)
# Keep all test data that is classified as active
# Note that there are replicates, but all the replicates for a single
# guide-target pair will have the same classification score because the
# classification is deterministic and depends only on the guide-target
# sequence (however their measured activity may differ). So if a
# guide-target pair is classified as active, keep all of its replicates;
# and if it is classified as inactive, discard all replicates
x_test_filtered, y_test_filtered = [], []
added_enc = set()
for row in rows:
enc = (row['target'], row['guide'], int(row['crrna_pos']))
if enc in added_enc:
# Already added data points for this
continue
if float(row['predicted_activity']) >= score_threshold:
# Classify this as active, and add all data points (replicates)
# for this guide-target pair
for i in encoding_idx[enc]:
x_test_filtered.append(x_test[i])
y_test_filtered.append(y_test[i])
added_enc.add(enc)
x_test_filtered = np.array(x_test_filtered)
y_test_filtered = np.array(y_test_filtered)
return x_test_filtered, y_test_filtered
#####################################################################
#####################################################################
# Custom functions for training and testing
#####################################################################
#####################################################################
# For classification, use cross-entropy as the loss function
# This expects sigmoids (values in [0,1]) as the output; it will
# transform back to logits (not bounded betweem 0 and 1) before
# calling tf.nn.sigmoid_cross_entropy_with_logits
bce_per_sample = tf.keras.losses.BinaryCrossentropy()
# For regression, use mean squared error as the loss function
mse_per_sample = tf.keras.losses.MeanSquaredError()
# When outputting loss, take the mean across the samples from each batch
train_loss_metric = tf.keras.metrics.Mean(name='train_loss')
validate_loss_metric = tf.keras.metrics.Mean(name='validate_loss')
test_loss_metric = tf.keras.metrics.Mean(name='test_loss')
# Define metrics for regression
# tf.keras.metrics does not have Pearson correlation or Spearman's correlation,
# so we have to define these; note that it becomes much easier to use these
# outside of the tf.function functions rather than inside of them (like the
# other metrics are used)
# This also defines a metric for R^2 (below, R2Score)
# Note that R2Score does not necessarily equal r^2 here, where r is
# pearson_corr. The value R2Score is computed by definition of R^2 (1 minus
# (residual sum of squares)/(total sum of squares)) from the true vs. predicted
# values. This is why R2Score can be negative: it can do an even worse job with
# prediction than just predicting the mean. r is computed by simple least
# squares regression between y_true and y_pred, and finding the Pearson's r of
# this curve (since this is simple linear regression, r^2 should be
# nonnegative). The value R2 measures the goodness-of-fit of the specific
# linear correlation y_pred = y_true, whereas r measures the correlation from
# the regression (y_pred = m*y_true + b).
def pearson_corr(y_true, y_pred):
if len(y_true) < 2:
# Avoid exception
r = np.nan
else:
r, _ = scipy.stats.pearsonr(y_true, y_pred)
return r
def spearman_corr(y_true, y_pred):
if len(y_true) < 2:
# Avoid exception
rho = np.nan
else:
rho, _ = scipy.stats.spearmanr(y_true, y_pred)
return rho
class CustomMetric:
def __init__(self, name):
self.__name__ = name
self.y_true = []
self.y_pred = []
def __call__(self, y_true, y_pred):
# Save y_true and y_pred (tensors) into a list
self.y_true += [y_true]
self.y_pred += [y_pred]
def to_np_array(self):
# Concat tensors and convert to numpy arrays
y_true_np = tf.reshape(tf.concat(self.y_true, 0), [-1]).numpy()
y_pred_np = tf.reshape(tf.concat(self.y_pred, 0), [-1]).numpy()
return y_true_np, y_pred_np
def result(self):
raise NotImplementedError("result() must be implemented in a subclass")
def reset_states(self):
self.y_true = []
self.y_pred = []
class Correlation(CustomMetric):
def __init__(self, corrtype, name='correlation'):
assert corrtype in ('pearson_corr', 'spearman_corr')
if corrtype == 'pearson_corr':
self.corr_fn = pearson_corr
if corrtype == 'spearman_corr':
self.corr_fn = spearman_corr
super().__init__(name)
def result(self):
y_true_np, y_pred_np = super(Correlation, self).to_np_array()
return self.corr_fn(y_true_np, y_pred_np)
class R2Score(CustomMetric):
def __init__(self, name='r2_score'):
super().__init__(name)
def result(self):
y_true_np, y_pred_np = super(R2Score, self).to_np_array()
return sklearn.metrics.r2_score(y_true_np, y_pred_np)
train_mse_metric = tf.keras.metrics.MeanSquaredError(name='train_mse')
train_mse_weighted_metric = tf.keras.metrics.MeanSquaredError(name='train_mse_weighted')
train_mae_metric = tf.keras.metrics.MeanAbsoluteError(name='train_mae')
train_mape_metric = tf.keras.metrics.MeanAbsolutePercentageError(name='train_mape')
train_r2_score_metric = R2Score(name='train_r2_score')
train_pearson_corr_metric = Correlation('pearson_corr', name='train_pearson_corr')
train_spearman_corr_metric = Correlation('spearman_corr', name='train_spearman_corr')
validate_mse_metric = tf.keras.metrics.MeanSquaredError(name='validate_mse')
validate_mse_weighted_metric = tf.keras.metrics.MeanSquaredError(name='validate_mse_weighted')
validate_mae_metric = tf.keras.metrics.MeanAbsoluteError(name='validate_mae')
validate_mape_metric = tf.keras.metrics.MeanAbsolutePercentageError(name='validate_mape')
validate_r2_score_metric = R2Score(name='validate_r2_score')
validate_pearson_corr_metric = Correlation('pearson_corr', name='validate_pearson_corr')
validate_spearman_corr_metric = Correlation('spearman_corr', name='validate_spearman_corr')
test_mse_metric = tf.keras.metrics.MeanSquaredError(name='test_mse')
test_mse_weighted_metric = tf.keras.metrics.MeanSquaredError(name='test_mse_weighted')
test_mae_metric = tf.keras.metrics.MeanAbsoluteError(name='test_mae')
test_mape_metric = tf.keras.metrics.MeanAbsolutePercentageError(name='test_mape')
test_r2_score_metric = R2Score(name='test_r2_score')
test_pearson_corr_metric = Correlation('pearson_corr', name='test_pearson_corr')
test_spearman_corr_metric = Correlation('spearman_corr', name='test_spearman_corr')
# Define metrics for classification
# Report on the accuracy and AUC for each epoch (each metric is updated
# with data from each batch, and computed using data from all batches)
train_bce_metric = tf.keras.metrics.BinaryCrossentropy(name='train_bce')
train_bce_weighted_metric = tf.keras.metrics.BinaryCrossentropy(name='train_bce_weighted')
train_accuracy_metric = tf.keras.metrics.BinaryAccuracy(name='train_accuracy')
train_auc_roc_metric = tf.keras.metrics.AUC(
num_thresholds=200, curve='ROC', name='train_auc_roc')
train_auc_pr_metric = tf.keras.metrics.AUC(
num_thresholds=200, curve='PR', name='train_auc_pr')
validate_bce_metric = tf.keras.metrics.BinaryCrossentropy(name='validate_bce')
validate_bce_weighted_metric = tf.keras.metrics.BinaryCrossentropy(name='validate_bce_weighted')
validate_accuracy_metric = tf.keras.metrics.BinaryAccuracy(name='validate_accuracy')
validate_auc_roc_metric = tf.keras.metrics.AUC(
num_thresholds=200, curve='ROC', name='validate_auc_roc')
validate_auc_pr_metric = tf.keras.metrics.AUC(
num_thresholds=200, curve='PR', name='validate_auc_pr')
test_bce_metric = tf.keras.metrics.BinaryCrossentropy(name='test_bce')
test_bce_weighted_metric = tf.keras.metrics.BinaryCrossentropy(name='test_bce_weighted')
test_accuracy_metric = tf.keras.metrics.BinaryAccuracy(name='test_accuracy')
test_auc_roc_metric = tf.keras.metrics.AUC(
num_thresholds=200, curve='ROC', name='test_auc_roc')
test_auc_pr_metric = tf.keras.metrics.AUC(
num_thresholds=200, curve='PR', name='test_auc_pr')
# Store the model and optimizer as global (module-wide) variables
# If passing them directly to train_step(), validate_step(), and test_step(),
# TensorFlow complains about having to do tf.function retracing, which is
# expensive and due to passing Python objects instead of tensors
_model = None
_optimizer = None
# Train the model using GradientTape; this is called on each batch
def train_step(seqs, outputs, sample_weight=None):
if _model.regression:
loss_fn = mse_per_sample
else:
loss_fn = bce_per_sample
with tf.GradientTape() as tape:
# Compute predictions and loss
# Pass along `training=True` so that this can be given to
# the batchnorm and dropout layers; an alternative to passing
# it along would be to use `tf.keras.backend.set_learning_phase(1)`
# to set the training phase
predictions = _model(seqs, training=True)
prediction_loss = loss_fn(outputs, predictions,
sample_weight=sample_weight)
# Add the regularization losses
regularization_loss = tf.add_n(_model.losses)
loss = prediction_loss + regularization_loss
# Compute gradients and opitmize parameters
gradients = tape.gradient(loss, _model.trainable_variables)
_optimizer.apply_gradients(zip(gradients, _model.trainable_variables))
# Record metrics
train_loss_metric(loss)
if _model.regression:
train_mse_metric(outputs, predictions)
train_mse_weighted_metric(outputs, predictions, sample_weight=sample_weight)
train_mae_metric(outputs, predictions)
train_mape_metric(outputs, predictions)
else:
train_bce_metric(outputs, predictions)
train_bce_weighted_metric(outputs, predictions, sample_weight=sample_weight)
train_accuracy_metric(outputs, predictions)
train_auc_roc_metric(outputs, predictions)
train_auc_pr_metric(outputs, predictions)
return outputs, predictions
# Define functions for computing validation and test metrics; these are
# called on each batch
def validate_step(seqs, outputs, sample_weight=None):
# Compute predictions and loss
predictions = _model(seqs, training=False)
if _model.regression:
loss_fn = mse_per_sample
else:
loss_fn = bce_per_sample
prediction_loss = loss_fn(outputs, predictions,
sample_weight=sample_weight)
regularization_loss = tf.add_n(_model.losses)
loss = prediction_loss + regularization_loss
# Record metrics
validate_loss_metric(loss)
if _model.regression:
validate_mse_metric(outputs, predictions)
validate_mse_weighted_metric(outputs, predictions, sample_weight=sample_weight)
validate_mae_metric(outputs, predictions)
validate_mape_metric(outputs, predictions)
else:
validate_bce_metric(outputs, predictions)
validate_bce_weighted_metric(outputs, predictions, sample_weight=sample_weight)
validate_accuracy_metric(outputs, predictions)
validate_auc_roc_metric(outputs, predictions)
validate_auc_pr_metric(outputs, predictions)
return outputs, predictions
def test_step(seqs, outputs, sample_weight=None):
# Compute predictions
predictions = _model(seqs, training=False)
if _model.regression:
loss_fn = mse_per_sample
else:
loss_fn = bce_per_sample
prediction_loss = loss_fn(outputs, predictions,
sample_weight=sample_weight)
regularization_loss = tf.add_n(_model.losses)
loss = prediction_loss + regularization_loss
# Record metrics
test_loss_metric(loss)
if _model.regression:
test_mse_metric(outputs, predictions)
test_mse_weighted_metric(outputs, predictions, sample_weight=sample_weight)
test_mae_metric(outputs, predictions)
test_mape_metric(outputs, predictions)
else:
test_bce_metric(outputs, predictions)
test_bce_weighted_metric(outputs, predictions, sample_weight=sample_weight)
test_accuracy_metric(outputs, predictions)
test_auc_roc_metric(outputs, predictions)
test_auc_pr_metric(outputs, predictions)
return outputs, predictions
# Here we will effectively implement tf.keras.callbacks.EarlyStopping() to
# decide when to stop training; because we are not using model.fit(..) we
# cannot use this callback out-of-the-box
# Set the number of epochs that must pass with no improvement in the
# validation loss, after which we will stop training
STOPPING_PATIENCE = 2
def train_and_validate(model, x_train, y_train, x_validate, y_validate,
max_num_epochs, data_parser):
"""Train the model and also validate on each epoch.
Args:
model: model object
x_train, y_train: training input and outputs (labels, if
classification)
x_validate, y_validate: validation input and outputs (labels, if
classification)
max_num_epochs: maximum number of epochs to train for
data_parser: data parser object from parse_data
Returns:
tuple (dict with training metrics at the end, dict with validation
metrics at the end); keys in the dicts are 'loss' and
('bce' or 'mse') and ('auc-roc' or 'r-spearman')
"""
# Define an optimizer
if model.regression:
# When doing regression, sometimes the output would always be the
# same value regardless of input; decreasing the learning rate fixed this
optimizer = tf.keras.optimizers.Adam(lr=model.learning_rate)
else:
optimizer = tf.keras.optimizers.Adam(lr=model.learning_rate)
# Set the global module-level variables _model and _optimizer, needed by
# train_step() and validate_step()
global _model
global _optimizer
_model = model
_optimizer = optimizer
# model may be new, and calling train_step on a new model will yield
# an error; tf.function was designed such that a new one is needed
# whenever there is a new model
# (see
# https://github.com/tensorflow/tensorflow/issues/27525#issuecomment-481025914)
tf_train_step = tf.function(train_step)
tf_validate_step = tf.function(validate_step)
train_ds = make_dataset_and_batch(x_train, y_train,
batch_size=model.batch_size)
validate_ds = make_dataset_and_batch(x_validate, y_validate,
batch_size=model.batch_size)
# For classification, determine class weights
if not model.regression:
y_train_labels = [y_train[i][0] for i in range(len(y_train))]
class_weights = sklearn.utils.class_weight.compute_class_weight(
'balanced', sorted(np.unique(y_train_labels)), y_train_labels)
class_weights = list(class_weights)
model.class_weights = class_weights
print('Using class weights: {}'.format(class_weights))
# For regression, determine mean sample weight so that we can
# normalize to have mean=1
if model.regression:
train_weights = []
for seqs, outputs in train_ds:
train_weights += [data_parser.sample_regression_weight(xi, yi,
p=model.sample_weight_scaling_factor)
for xi, yi in zip(seqs, outputs)]
train_weight_mean = np.mean(train_weights)
validate_weights = []
for seqs, outputs in validate_ds:
validate_weights += [data_parser.sample_regression_weight(xi, yi,
p=model.sample_weight_scaling_factor)
for xi, yi in zip(seqs, outputs)]
validate_weight_mean = np.mean(validate_weights)
else:
train_weight_mean = None
validate_weight_mean = None
def determine_sample_weights(seqs, outputs, norm_factor=None):
if not model.regression:
# Classification; weight by class
labels = [int(o.numpy()[0]) for o in outputs]
return [class_weights[label] for label in labels]
else:
# Regression; weight by variance
weights = [data_parser.sample_regression_weight(xi, yi,
p=model.sample_weight_scaling_factor)
for xi, yi in zip(seqs, outputs)]
if norm_factor is not None:
weights = [w / norm_factor for w in weights]
return weights
# Compute weights for all samples once, rather than having to do so in
# every epoch
train_ds_weights = []
for seqs, outputs in train_ds:
sample_weight = determine_sample_weights(seqs, outputs,
norm_factor=train_weight_mean)
train_ds_weights += [sample_weight]
validate_ds_weights = []
for seqs, outputs in validate_ds:
sample_weight = determine_sample_weights(seqs, outputs,
norm_factor=validate_weight_mean)
validate_ds_weights += [sample_weight]
best_val_loss = None
num_epochs_past_best_loss = 0
for epoch in range(max_num_epochs):
# Train on each batch
for i, (seqs, outputs) in enumerate(train_ds):
sample_weight = tf.constant(train_ds_weights[i])
y_true, y_pred = tf_train_step(seqs, outputs,
sample_weight=sample_weight)
if model.regression:
train_r2_score_metric(y_true, y_pred)
train_pearson_corr_metric(y_true, y_pred)
train_spearman_corr_metric(y_true, y_pred)
# Validate on each batch
# Note that we could run the validation data through the model all
# at once (not batched), but batching may help with memory usage by
# reducing how much data is run through the network at once
for i, (seqs, outputs) in enumerate(validate_ds):
sample_weight = tf.constant(validate_ds_weights[i])
y_true, y_pred = tf_validate_step(seqs, outputs,
sample_weight=sample_weight)
if model.regression:
validate_r2_score_metric(y_true, y_pred)
validate_pearson_corr_metric(y_true, y_pred)
validate_spearman_corr_metric(y_true, y_pred)
# A note on one easy source of confusion (written here for
# classification, but it may apply to weighted MSE with regression as
# well):
# We might think the prediction_loss as calculated by train_step() and
# validate_step() should be equal to the weighted BCE because the loss
# function is binary cross-entropy. However, it appears that the loss
# funcion calculation multiples the binary cross-entropy for each
# sample i by sample_weight[i], directly as it is given. On the other
# hand, it seems that {train,validate}_bce_weighted_metric normalize
# the input sample weights (sample_weight) before the multiplication.
# As a result, the prediction_loss (and thus the train or validate loss
# value) can be quite different than weighted BCE; this might be
# especially true for validation, as the class weights are computed
# over the train data so, for validation, the sample weights might not
# have a mean of 1. One way to see this is that, when multiplying the
# sample_weight input to {train,validate}_step() by a scalar, the loss
# value is multipled by that scalar but the weighted BCE is unchanged.
# As a result, the loss value on different validation/test sets might
# not be comparable, but the weighted BCE might be. The normalization
# by {train,validate}_bce_weighted_metric seems to happen *per batch*
# (i.e., on the call to update state), rather than on the calculation
# at the end -- likely because it computes the updated mean every time
# the state is updated. One way to see this is to change, above:
# ```
# y_true, y_pred = tf_train_step(seqs, outputs,
# sample_weight=sample_weight)
# ```
# to
# ```
# y_true, y_pred = tf_train_step(seqs, outputs,
# sample_weight=(1.0/np.mean(sample_weight))*sample_weight)
# ```
# so that a normalized sample_weight is passed for each batch; the loss
# function value (namely, predicted_loss) should adjust whereas the
# weighted BCE metric should not change, and the two will now equal
# each other. As a result of this per batch normalization, I would lean
# toward ignoring the weighted BCE metric (and likely weighted MSE
# too); variation across batches will likely make it an unreliable
# metric.
# Log the metrics from this epoch
print('EPOCH {}'.format(epoch+1))
print(' Train metrics:')
print(' Loss: {}'.format(train_loss_metric.result()))
if model.regression:
print(' MSE: {}'.format(train_mse_metric.result()))
print(' Weighted MSE: {}'.format(train_mse_weighted_metric.result()))
print(' MAE: {}'.format(train_mae_metric.result()))
print(' MAPE: {}'.format(train_mape_metric.result()))
print(' R^2 score: {}'.format(train_r2_score_metric.result()))
print(' r-Pearson: {}'.format(train_pearson_corr_metric.result()))
print(' r-Spearman: {}'.format(train_spearman_corr_metric.result()))
else:
print(' BCE: {}'.format(train_bce_metric.result()))
print(' Weighted BCE: {}'.format(train_bce_weighted_metric.result()))
print(' Accuracy: {}'.format(train_accuracy_metric.result()))
print(' AUC-ROC: {}'.format(train_auc_roc_metric.result()))
print(' AUC-PR: {}'.format(train_auc_pr_metric.result()))
print(' Validate metrics:')
print(' Loss: {}'.format(validate_loss_metric.result()))
if model.regression:
print(' MSE: {}'.format(validate_mse_metric.result()))
print(' Weighted MSE: {}'.format(validate_mse_weighted_metric.result()))
print(' MAE: {}'.format(validate_mae_metric.result()))
print(' MAPE: {}'.format(validate_mape_metric.result()))
print(' R^2 score: {}'.format(validate_r2_score_metric.result()))
print(' r-Pearson: {}'.format(validate_pearson_corr_metric.result()))
print(' r-Spearman: {}'.format(validate_spearman_corr_metric.result()))
else:
print(' BCE: {}'.format(validate_bce_metric.result()))
print(' Weighted BCE: {}'.format(validate_bce_weighted_metric.result()))
print(' Accuracy: {}'.format(validate_accuracy_metric.result()))
print(' AUC-ROC: {}'.format(validate_auc_roc_metric.result()))
print(' AUC-PR: {}'.format(validate_auc_pr_metric.result()))
train_loss = train_loss_metric.result()
val_loss = validate_loss_metric.result()
if model.regression:
train_mse = train_mse_metric.result()
train_pearson_corr = train_pearson_corr_metric.result()
train_spearman_corr = train_spearman_corr_metric.result()
val_mse = validate_mse_metric.result()
val_pearson_corr = validate_pearson_corr_metric.result()
val_spearman_corr = validate_spearman_corr_metric.result()
else:
train_bce = train_bce_metric.result()
train_bce_weighted = train_bce_weighted_metric.result()
train_auc_roc = train_auc_roc_metric.result()
train_auc_pr = train_auc_pr_metric.result()
val_bce = validate_bce_metric.result()
val_bce_weighted = validate_bce_weighted_metric.result()
val_auc_roc = validate_auc_roc_metric.result()
val_auc_pr = validate_auc_pr_metric.result()
# Reset metric states so they are not cumulative over epochs
train_loss_metric.reset_states()
train_mse_metric.reset_states()
train_mse_weighted_metric.reset_states()
train_mae_metric.reset_states()
train_mape_metric.reset_states()
train_r2_score_metric.reset_states()
train_pearson_corr_metric.reset_states()
train_spearman_corr_metric.reset_states()
train_bce_metric.reset_states()
train_bce_weighted_metric.reset_states()
train_accuracy_metric.reset_states()
train_auc_roc_metric.reset_states()
train_auc_pr_metric.reset_states()
validate_loss_metric.reset_states()
validate_mse_metric.reset_states()
validate_mse_weighted_metric.reset_states()
validate_mae_metric.reset_states()
validate_mape_metric.reset_states()
validate_r2_score_metric.reset_states()
validate_pearson_corr_metric.reset_states()
validate_spearman_corr_metric.reset_states()
validate_bce_metric.reset_states()
validate_bce_weighted_metric.reset_states()
validate_accuracy_metric.reset_states()
validate_auc_roc_metric.reset_states()
validate_auc_pr_metric.reset_states()
# Decide whether to stop at this epoch
if best_val_loss is None or val_loss < best_val_loss:
# Update the best validation loss
best_val_loss = val_loss
num_epochs_past_best_loss = 0
else:
# This loss is worse than one seen before
num_epochs_past_best_loss += 1
if num_epochs_past_best_loss >= STOPPING_PATIENCE:
# Stop here
print(' Stopping at EPOCH {}'.format(epoch+1))
break
if model.regression:
train_metrics = {'loss': train_loss.numpy(), 'mse': train_mse.numpy(),
'r-pearson': train_pearson_corr,
'r-spearman': train_spearman_corr}
val_metrics = {'loss': val_loss.numpy(), 'mse': val_mse.numpy(),
'r-pearson': val_pearson_corr,
'r-spearman': val_spearman_corr}
else:
train_metrics = {'loss': train_loss.numpy(), 'bce': train_bce.numpy(),
'weighted-bce': train_bce_weighted.numpy(),
'auc-pr': train_auc_pr.numpy(),
'auc-roc': train_auc_roc.numpy()}
val_metrics = {'loss': val_loss.numpy(), 'bce': val_bce.numpy(),
'weighted-bce': val_bce_weighted.numpy(),
'auc-pr': val_auc_pr.numpy(),
'auc-roc': val_auc_roc.numpy()}
return train_metrics, val_metrics
def test(model, x_test, y_test, data_parser, plot_roc_curve=None,
plot_predictions=None, write_test_tsv=None,
y_train=None):
"""Test a model.
This prints metrics.
Args:
model: model object
x_test, y_test: testing input and outputs (labels, if
classification)
data_parser: data parser object from parse_data
plot_roc_curve: if set, path to PDF at which to save plot of ROC curve
plot_predictions: if set, path to PDF at which to save plot of
predictions vs. true values
write_test_tsv: if set, path to TSV at which to write data on predictions
as well as the testing sequences (one row per data point)
y_train: (optional) if set, print metrics if the predictor simply
predicts the mean of the training data
Returns:
dict with test metrics at the end (keys are 'loss'
and ('bce' or 'mse') and ('auc-roc' or 'r-spearman'))
"""
# Set the global module-level variables _model, needed by test_step()
global _model
_model = model
tf_test_step = tf.function(test_step)
test_ds = make_dataset_and_batch(x_test, y_test,
batch_size=model.batch_size)
# For regression, determine mean sample weight so that we can
# normalize to have mean=1
if model.regression:
test_weights = []
for seqs, outputs in test_ds:
test_weights += [data_parser.sample_regression_weight(xi, yi,
p=model.sample_weight_scaling_factor)
for xi, yi in zip(seqs, outputs)]
test_weight_mean = np.mean(test_weights)
else:
test_weight_mean = None
def determine_sample_weights(seqs, outputs, norm_factor=None):
if not model.regression:
# Classification; weight by class
labels = [int(o.numpy()[0]) for o in outputs]
return [model.class_weights[label] for label in labels]
else:
# Regression; weight by variance
weights = [data_parser.sample_regression_weight(xi, yi,
p=model.sample_weight_scaling_factor)
for xi, yi in zip(seqs, outputs)]
if norm_factor is not None:
weights = [w / norm_factor for w in weights]
return weights
all_true = []
all_predictions = []
for seqs, outputs in test_ds:
sample_weight = determine_sample_weights(seqs, outputs,
norm_factor=test_weight_mean)
sample_weight = tf.constant(sample_weight)
y_true, y_pred = tf_test_step(seqs, outputs,
sample_weight=sample_weight)
if model.regression:
test_r2_score_metric(y_true, y_pred)
test_pearson_corr_metric(y_true, y_pred)
test_spearman_corr_metric(y_true, y_pred)
all_true += list(tf.reshape(y_true, [-1]).numpy())
all_predictions += list(tf.reshape(y_pred, [-1]).numpy())
# Check the ordering: y_test should be the same as all_true
# (y_test is a 2d array: [[value], [value], ..., [value]] so it must
# be flattened prior to comparison)
assert np.allclose(y_test.flatten(), all_true)
# See note in train_and_validate() for discrepancy between loss and
# weighted metric values.
print('TEST DONE')
print(' Test metrics:')
print(' Loss: {}'.format(test_loss_metric.result()))
if model.regression:
print(' MSE: {}'.format(test_mse_metric.result()))
print(' Weighted MSE: {}'.format(test_mse_weighted_metric.result()))
print(' MAE: {}'.format(test_mae_metric.result()))
print(' MAPE: {}'.format(test_mape_metric.result()))
print(' R^2 score: {}'.format(test_r2_score_metric.result()))
print(' r-Pearson: {}'.format(test_pearson_corr_metric.result()))
print(' r-Spearman: {}'.format(test_spearman_corr_metric.result()))
else:
print(' BCE: {}'.format(test_bce_metric.result()))
print(' Weighted BCE: {}'.format(test_bce_weighted_metric.result()))
print(' Accuracy: {}'.format(test_accuracy_metric.result()))
print(' AUC-ROC: {}'.format(test_auc_roc_metric.result()))
print(' AUC-PR: {}'.format(test_auc_pr_metric.result()))
test_loss = test_loss_metric.result()
if model.regression:
test_mse = test_mse_metric.result()
test_pearson_corr = test_pearson_corr_metric.result()
test_spearman_corr = test_spearman_corr_metric.result()
else:
test_bce = test_bce_metric.result()
test_bce_weighted = test_bce_weighted_metric.result()
test_auc_roc = test_auc_roc_metric.result()
test_auc_pr = test_auc_pr_metric.result()
test_loss_metric.reset_states()
test_mse_metric.reset_states()
test_mse_weighted_metric.reset_states()
test_mae_metric.reset_states()
test_mape_metric.reset_states()
test_r2_score_metric.reset_states()
test_pearson_corr_metric.reset_states()
test_spearman_corr_metric.reset_states()
test_bce_metric.reset_states()
test_bce_weighted_metric.reset_states()
test_accuracy_metric.reset_states()
test_auc_roc_metric.reset_states()
test_auc_pr_metric.reset_states()
if model.regression and y_train is not None:
# Print what the MSE would be if only predicting the mean of
# the training data
print(' MSE on test data if predicting mean of train data:',
np.mean(np.square(np.mean(y_train) - np.array(all_true))))
x_test_pos = [data_parser.pos_for_input(xi) for xi in x_test]
if write_test_tsv:
# Determine features for all input sequences
seq_feats = []
for i in range(len(x_test)):
seq_feats += [data_parser.seq_features_from_encoding(x_test[i])]
cols = ['target', 'target_without_context', 'guide',
'hamming_dist', 'cas13a_pfs', 'crrna_pos', 'true_activity',
'predicted_activity']
with gzip.open(write_test_tsv, 'wt') as fw:
def write_row(row):
fw.write('\t'.join(str(x) for x in row) + '\n')
# Write header
write_row(cols)
# Write row for each data point
for i in range(len(x_test)):
def val(k):
if k == 'true_activity':
return all_true[i]
elif k == 'predicted_activity':
return all_predictions[i]
elif k == 'crrna_pos':
if x_test_pos is None:
# Use -1 if position is unknown
return -1
else:
# x_test_pos[i] gives position of x_test[i]
return x_test_pos[i]
else:
return seq_feats[i][k]
write_row([val(k) for k in cols])
if plot_roc_curve:
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
fpr, tpr, thresholds = roc_curve(all_true, all_predictions)
plt.figure(1)
plt.plot(fpr, tpr)
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.show()
plt.savefig(plot_roc_curve)
if plot_predictions:
import matplotlib.pyplot as plt
plt.figure(1)
plt.scatter(all_true, all_predictions, c=x_test_pos)
plt.xlabel('True value')
plt.ylabel('Predicted value')
plt.title('True vs. predicted values')
plt.show()
plt.savefig(plot_predictions)
if model.regression:
test_metrics = {'loss': test_loss.numpy(), 'mse': test_mse.numpy(),
'r-pearson': test_pearson_corr,
'r-spearman': test_spearman_corr}
else:
test_metrics = {'loss': test_loss.numpy(), 'bce': test_bce.numpy(),
'weighted-bce': test_bce_weighted.numpy(),
'auc-pr': test_auc_pr.numpy(),
'auc-roc': test_auc_roc.numpy()}
return test_metrics
#####################################################################
#####################################################################
#####################################################################
#####################################################################
#####################################################################
#####################################################################
# Functions to train and test using Keras
#
# Compared to the custom functions above, this provides less
# flexibility but makes it simpler and possible to train across
# multiple GPUs.
#####################################################################
#####################################################################
def train_with_keras(model, x_train, y_train, x_validate, y_validate,
max_num_epochs=50):
"""Fit a model using Keras.
The model must have already been compiled (e.g., with construct_model()
above).
Args:
model: compiled model, e.g., output by construct_model()
x_train/y_train: training data
x_validate/y_validate: validation data; also used for early stopping
max_num_epochs: maximum number of epochs to train for; note that
the number it is trained for should be less due to early stopping
"""
# Setup early stopping
# The validation data is only used for early stopping
es = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
mode='min', patience=10, restore_best_weights=True)
# Fit the model
model.fit(x_train, y_train, validation_data=(x_validate, y_validate),
batch_size=model.batch_size, callbacks=[es],
class_weight=model.class_weight,
epochs=max_num_epochs,
verbose=2)
def test_with_keras(model, x_test, y_test, data_parser, write_test_tsv=None,
callback=None, regression=False):
"""Test a model.
This prints metrics.
Args:
model: model object
x_test, y_test: testing input and outputs (labels, if
classification)
data_parser: data parser object from parse_data
write_test_tsv: if set, path to TSV at which to write data on predictions
as well as the testing sequences (one row per data point)
callback: if set, a function to call that accepts the true and
predicted test values -- called like callback(y_true, f_pred)
regression: True iff this is testing a model for regression;
this is only used if model.regression is not available
Returns:
dict with test metrics at the end (keys are 'loss'
and ('bce' or 'mse') and ('auc-roc' or 'r-spearman'))
"""
# model may not have batch_size if it is loaded from a SavedModel
# serialization
# But the batch_size should not matter for testing, so just use 32
if hasattr(model, 'batch_size'):
batch_size = model.batch_size
else:
batch_size = 32
# Likewise, model may not have regression attribute if it is loaded
# from a SavedModel serialization
# Override the argument given to this function if it does; otherwise
# just use the argument
if hasattr(model, 'regression'):
regression = model.regression
# Evaluate on test data
test_metrics = model.evaluate(x_test, y_test,
batch_size=batch_size)
# Turn test_metrics from list into dict
test_metrics = dict(zip(model.metrics_names, test_metrics))
y_true = y_test
y_pred = model.predict(x_test, batch_size=batch_size)
if write_test_tsv:
x_test_pos = [data_parser.pos_for_input(xi) for xi in x_test]
# Determine features for all input sequences
seq_feats = []
for i in range(len(x_test)):
seq_feats += [data_parser.seq_features_from_encoding(x_test[i])]
cols = ['target', 'target_without_context', 'guide',
'hamming_dist', 'cas13a_pfs', 'crrna_pos', 'true_activity',
'predicted_activity']
with gzip.open(write_test_tsv, 'wt') as fw:
def write_row(row):
fw.write('\t'.join(str(x) for x in row) + '\n')
# Write header
write_row(cols)
# Write row for each data point
for i in range(len(x_test)):
def val(k):
if k == 'true_activity':
yt = y_true[i]
assert len(yt) == 1
return yt[0]
elif k == 'predicted_activity':
yp = y_pred[i]
assert len(yp) == 1
return yp[0]
elif k == 'crrna_pos':
if x_test_pos is None:
# Use -1 if position is unknown
return -1
else:
# x_test_pos[i] gives position of x_test[i]
return x_test_pos[i]
else:
return seq_feats[i][k]
write_row([val(k) for k in cols])
if regression:
mse_metric = tf.keras.metrics.MeanSquaredError()
mse_metric(y_true, y_pred)
mse = mse_metric.result().numpy()
pearson_corr_metric = Correlation('pearson_corr')
pearson_corr_metric(y_true, y_pred)
pearson_corr = pearson_corr_metric.result()
spearman_corr_metric = Correlation('spearman_corr')
spearman_corr_metric(y_true, y_pred)
spearman_corr = spearman_corr_metric.result()
test_metrics = {'loss': test_metrics['loss'],
'mse': mse,
'r-pearson': pearson_corr,
'r-spearman': spearman_corr}
else:
bce_metric = tf.keras.metrics.BinaryCrossentropy()
bce_metric(y_true, y_pred)
bce = bce_metric.result().numpy()
auc_pr_metric = tf.keras.metrics.AUC(num_thresholds=500, curve='PR')
auc_pr_metric(y_true, y_pred)
auc_pr = auc_pr_metric.result().numpy()
auc_roc_metric = tf.keras.metrics.AUC(num_thresholds=500, curve='ROC')
auc_roc_metric(y_true, y_pred)
auc_roc = auc_roc_metric.result().numpy()
test_metrics = {'loss': test_metrics['loss'],
'bce': bce,
'auc-pr': auc_pr,
'auc-roc': auc_roc}
print('TEST METRICS:', test_metrics)
if callback is not None:
callback(y_true, y_pred)
return test_metrics
#####################################################################
#####################################################################
#####################################################################
#####################################################################
def main():
# Read arguments and data
args = parse_args()
if args.load_model:
# Read saved parameters and load them into the args namespace
print('Loading parameters for model..')
load_path_params = os.path.join(args.load_model,
'model.params.pkl')
with open(load_path_params, 'rb') as f:
saved_params = pickle.load(f)
params = vars(args)
for k, v in saved_params.items():
print("Setting argument '{}'={}".format(k, v))
params[k] = v
if args.test_split_frac:
train_and_validate_split_frac = 1.0 - args.test_split_frac
if not (args.load_model or args.load_model_as_tf_savedmodel):
# Since this will be training a model, reserve validation
# data (25%) for early stopping
validate_frac = 0.2*train_and_validate_split_frac
train_frac = train_and_validate_split_frac - validate_frac
else:
train_frac = train_and_validate_split_frac
validate_frac = 0.0
split_frac = (train_frac, validate_frac, args.test_split_frac)
else:
split_frac = None
# Set seed and read data
set_seed(args.seed)
data_parser = read_data(args, split_frac=split_frac)
x_train, y_train = data_parser.train_set()
x_validate, y_validate = data_parser.validate_set()
x_test, y_test = data_parser.test_set()
# Determine, based on the dataset, whether to do regression or
# classification
if args.dataset == 'cas13':
if args.cas13_classify:
regression = False
else:
regression = True
if regression and args.plot_roc_curve:
raise Exception(("Can only use --plot-roc-curve when doing "
"classification"))
if not regression and args.plot_predictions:
raise Exception(("Can only use --plot-predictions when doing "
"regression"))
if args.load_model and args.load_model_as_tf_savedmodel:
raise Exception(("Cannot set both --load-model and "
"--load-model-as-tf-savedmodel"))
if args.load_model:
# Load the model weights; the model architecture is specified
# by params
print('Loading model weights..')
model = load_model(args.load_model, params, x_train, y_train)
print('Done loading model.')
elif args.load_model_as_tf_savedmodel:
# Load a model saved with TensorFlow's SavedModel format
# This contains both model architecture and weights
model = tf.keras.models.load_model(
args.load_model_as_tf_savedmodel)
else:
# Construct model
params = vars(args)
model = construct_model(params, x_train.shape, regression,
compile_for_keras=True, y_train=y_train)
# Train the model, with validation
train_with_keras(model, x_train, y_train, x_validate, y_validate)
if args.filter_test_data_by_classification_score:
if not regression:
raise Exception(("Can only use --filter-test-data-by-classification-"
"score when testing regression"))
classification_test_tsv, score_threshold = args.filter_test_data_by_classification_score
score_threshold = float(score_threshold)
x_test, y_test = filter_test_data_by_classification_score(
x_test, y_test, data_parser,
classification_test_tsv, score_threshold)
# Test the model
test_with_keras(model, x_test, y_test, data_parser,
write_test_tsv=args.write_test_tsv,
regression=regression)
if (not regression) and (not args.load_model_as_tf_savedmodel):
# Determine threshold for classifier
# Note that this should only use params; it should *not* use
# a pre-trained model with loaded weights
# This does not use test data
print('Determining classifier threshold via cross-validation')
num_splits = 5
thresholds = determine_classifier_threshold_for_precision(
params, x_train, y_train, num_splits, data_parser,
args.determine_classifier_threshold_for_precision)
print(('Mean threshold across folds to achieve precision of %f = %f') %
(args.determine_classifier_threshold_for_precision,
np.mean(thresholds)))
print(('Median threshold across folds to achieve precision of %f = %f') %
(args.determine_classifier_threshold_for_precision,
np.median(thresholds)))
print(' Thresholds are:', thresholds)
if args.serialize_model_with_tf_savedmodel:
# Serialize the model using TensorFlow's SavedModel format
# This saves model architecture, weights, and training configuration
model.save(args.serialize_model_with_tf_savedmodel)
if __name__ == "__main__":
main()
``` |
{
"source": "jingyi7777/CasRx_guide_efficiency",
"score": 3
} |
#### File: Deep-learning/dataset/all_guide_cd_validation_dataset.py
```python
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
import random
from dataset.dataset_utils import *
def all_guide_cd_validation_dataset(args):
dataframe = pd.read_csv('../../data/integrated_guide_feature_filtered_f24_mismatch3_all_features.csv')
num_examples = len(dataframe['gene'].values)
#lin_seq_dict, lin_result_dict = parse_guide_linearfold_fasta_into_dict_contrafold()
encoded_guides = [one_hot_encode_sequence(guide) for guide in dataframe['guide'].values]
#encoded_linearfold = [one_hot_encode_linearfold(lin_seq_dict[guide], remove_universal_start=True) for guide in
# dataframe['guide'].values]
other_single_value_inputs = np.empty((9, num_examples))
other_single_value_inputs[0, :] = dataframe['linearfold_vals'].values
other_single_value_inputs[1, :] = dataframe['is_5UTR'].values
other_single_value_inputs[2, :] = dataframe['is_CDS'].values
other_single_value_inputs[3, :] = dataframe['is_3UTR'].values
other_single_value_inputs[4, :] = dataframe['refseq_target_transcript_percent'].values
other_single_value_inputs[5, :] = dataframe['target unfold energy']
other_single_value_inputs[6, :] = dataframe['UTR5_position'].values
other_single_value_inputs[7, :] = dataframe['CDS_position'].values
other_single_value_inputs[8, :] = dataframe['UTR3_position'].values
#classes = 1- dataframe['old_binary_relative_ratio_gene20'].values
classes = dataframe['binary_relative_ratio_075f'].values
outputs = dataframe['relative_ratio'].values if args.regression else classes.astype(np.float32)
#outputs = outputs.tolist()
all_cols = [encoded_guides, # will be N x 4 from guide encoding
normalize(other_single_value_inputs.T),
# classes,
outputs
]
#tr = all_cols
if args.kfold == None:
tr, val, te_train = create_gene_splits(dataframe['gene'].values, all_cols)
else:
#tr, val = create_gene_splits_no_test_kfold(dataframe['gene'].values, all_cols, args.kfold, args.split)
#tr, val, te_train = create_gene_splits_kfold(dataframe['gene'].values, all_cols, args.kfold, args.split)
#tr, val, te_train = create_gene_splits_filter1_kfold(dataframe['gene'].values, all_cols, args.kfold, args.split)
#tr, val = create_gene_splits_filter1_no_test_kfold(dataframe['gene'].values, all_cols, args.kfold, args.split)
tr, val = create_gene_splits_filter1_test_asval_kfold(dataframe['gene'].values, all_cols, args.kfold, args.split)
# test set data, cd validation
tedf = pd.read_csv('dataset/cdscreen_filtered_t1_new_features_ratios.csv')
encoded_guides_te = [one_hot_encode_sequence(guide) for guide in tedf['guide'].values]
num_examples_te = len(tedf['guide'].values)
outputs_te = tedf['t1_bin1_to_sum_bin14_rank_withr3'].values if args.regression else tedf['t1_binary_relative_ratio_withrep3'].values
#outputs_te =outputs_te.tolist()
other_single_value_inputs_te = np.empty((9, num_examples_te))
other_single_value_inputs_te[0, :] = tedf['linf_contrafold_val'].values/max(dataframe['linearfold_vals'].values) #normalize as the training data
other_single_value_inputs_te[1, :] = tedf['is_5UTR'].values
other_single_value_inputs_te[2, :] = tedf['is_CDS'].values
other_single_value_inputs_te[3, :] = tedf['is_3UTR'].values
other_single_value_inputs_te[4, :] = tedf['refseq_target_transcript_percent'].values
other_single_value_inputs_te[5, :] = tedf['target_flank_ddg']/max(dataframe['target unfold energy'].values)
other_single_value_inputs_te[6, :] = tedf['UTR5_position']/max(dataframe['UTR5_position'].values)
other_single_value_inputs_te[7, :] = tedf['CDS_position']/max(dataframe['CDS_position'].values)
other_single_value_inputs_te[8, :] = tedf['UTR3_position']/max(dataframe['UTR3_position'].values)
all_cols_te = [encoded_guides_te, # will be N x 4 from guide encoding
other_single_value_inputs_te.T,
outputs_te
]
te = all_cols_te
tr_out = tr[-1]
tr = tuple(tr[:-1])
val_out = val[-1]
val = tuple(val[:-1])
te_out = te[-1]
te = tuple(te[:-1])
train_dataset = tf.data.Dataset.from_tensor_slices((tr, tr_out))
val_dataset = tf.data.Dataset.from_tensor_slices((val, val_out))
test_dataset = tf.data.Dataset.from_tensor_slices((te, te_out))
# shuffle and batch
train_dataset = prep_dataset(train_dataset, batch_size=128)
val_dataset = prep_dataset(val_dataset, batch_size=128)
test_dataset = prep_dataset(test_dataset, batch_size=128)
return train_dataset, val_dataset, test_dataset
```
#### File: Deep-learning/dataset/guide_all_features_9f_dataset.py
```python
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
import random
#from dataset.dataset_filtered_utils import *
from dataset.dataset_utils import *
#from dataset.dataset_utils import normalize
def guide_all_features_9f_dataset(args):
dataframe = pd.read_csv('../../data/integrated_guide_feature_filtered_f24_mismatch3_all_features.csv')
num_examples = len(dataframe['gene'].values)
#lin_seq_dict, lin_result_dict = parse_guide_linearfold_fasta_into_dict()
#lin_seq_dict, lin_result_dict = parse_guide_linearfold_fasta_into_dict_contrafold()
encoded_guides = [one_hot_encode_sequence(guide) for guide in dataframe['guide'].values]
#encoded_linearfold = [one_hot_encode_linearfold(lin_seq_dict[guide], remove_universal_start=True) for guide in
# dataframe['guide'].values]
# encoded_guides = [reverse_complement_encoding(guide) for guide in dataframe['guide'].values]
#target with nearby seq, dg of native and unfolded
# flank_l = int(args.flanklength)
# lin_seq_flanks_dict, lin_result_flanks_dict = parse_target_flanks_linearfold_fasta_into_dict_contrafold(flank_len = flank_l)
# linearfold_vals_target = [lin_result_flanks_dict[target_flanks] for target_flanks in dataframe['nearby_seq_all_'+str(flank_l)].values] #native energy
# #lin_seq_flanks = [lin_seq_flanks_dict[target_flanks] for target_flanks in dataframe['nearby_seq_all_100'].values]
# unfold_lin_seq_flanks_dict, unfold_lin_result_flanks_dict = parse_target_flanks_constraints_linearfold_fasta_into_dict_contrafold(flank_len = flank_l)
# unfold_linearfold_vals_target = [unfold_lin_result_flanks_dict[target_flanks] for target_flanks in dataframe['nearby_seq_all_'+str(flank_l)].values] #unfolded target energy
# ddg = [] #energy required to unfold the guide binding region
# for jj in range(num_examples):
# ddg.append((linearfold_vals_target[jj]-unfold_linearfold_vals_target[jj]))
classes = dataframe['binary_relative_ratio_075f'].values
outputs = dataframe['relative_ratio'].values if args.regression else classes.astype(np.float32)
other_single_value_inputs = np.empty((9, num_examples))
other_single_value_inputs[0, :] = dataframe['linearfold_vals'].values
other_single_value_inputs[1, :] = dataframe['is_5UTR'].values
other_single_value_inputs[2, :] = dataframe['is_CDS'].values
other_single_value_inputs[3, :] = dataframe['is_3UTR'].values
other_single_value_inputs[4, :] = dataframe['refseq_target_transcript_percent'].values
other_single_value_inputs[5, :] = dataframe['target unfold energy'].values
other_single_value_inputs[6, :] = dataframe['UTR5_position'].values
other_single_value_inputs[7, :] = dataframe['CDS_position'].values
other_single_value_inputs[8, :] = dataframe['UTR3_position'].values
#other_single_value_inputs[9, :] = dataframe['linearfold_dr_flag'].values
#other_single_value_inputs[10, :] = dataframe['GC_content'].values
all_cols = [encoded_guides,
normalize(other_single_value_inputs.T),
# classes,
outputs
]
if args.kfold == None:
tr, val, te = create_gene_splits(dataframe['gene'].values, all_cols)
else:
#tr, val, te = create_gene_splits_kfold(dataframe['gene'].values, all_cols, args.kfold, args.split)
tr, val, te = create_gene_splits_filter1_kfold(dataframe['gene'].values, all_cols, args.kfold, args.split)
tr_out = tr[-1]
tr = tuple(tr[:-1])
val_out = val[-1]
val = tuple(val[:-1])
te_out = te[-1]
te = tuple(te[:-1])
train_dataset = tf.data.Dataset.from_tensor_slices((tr, tr_out))
val_dataset = tf.data.Dataset.from_tensor_slices((val, val_out))
test_dataset = tf.data.Dataset.from_tensor_slices((te, te_out))
# shuffle and batch
train_dataset = prep_dataset(train_dataset, batch_size=128)
val_dataset = prep_dataset(val_dataset, batch_size=128)
test_dataset = prep_dataset(test_dataset, batch_size=128)
return train_dataset, val_dataset, test_dataset
```
#### File: Deep-learning/models/__init__.py
```python
import importlib
def find_model_using_name(model_name):
"""Import the module "models/[model_name]_model.py".
In the file, the class called DatasetNameModel() will
be instantiated. It has to be a subclass of BaseModel,
and it is case-insensitive.
"""
model_filename = "models." + model_name + "_model"
modellib = importlib.import_module(model_filename)
model = None
target_model_name_if_func = model_name + '_model'
target_model_name_if_class = model_name.replace('_', '') + 'model'
for name, potential_model_creator in modellib.__dict__.items():
if name.lower() == target_model_name_if_func.lower() or name.lower() == target_model_name_if_class.lower():
model = potential_model_creator
if model is None:
print("In %s.py, there should be a function that matches %s or class that matches %s in lowercase." % (
model_filename, target_model_name_if_func, target_model_name_if_class))
exit(0)
return model
def find_hp_model_using_name(model_name):
"""Import the module "models/[model_name]_model.py".
In the file, the class called DatasetNameModel() will
be instantiated. It has to be a subclass of BaseModel,
and it is case-insensitive.
"""
model_filename = "models." + model_name + "_model"
modellib = importlib.import_module(model_filename)
model = None
target_model_name_if_func = model_name + '_model_hp'
target_model_name_if_class = model_name.replace('_', '') + 'model_hp'
for name, potential_model_creator in modellib.__dict__.items():
if name.lower() == target_model_name_if_func.lower() or name.lower() == target_model_name_if_class.lower():
model = potential_model_creator
if model is None:
print("In %s.py, there should be a function that matches %s or class that matches %s in lowercase." % (
model_filename, target_model_name_if_func, target_model_name_if_class))
exit(0)
return model
if __name__ == '__main__':
model = find_model_using_name('no_gene_seq_lstm')()
print(model)
```
#### File: models/Deep-learning/predict_ensemble.py
```python
import tensorflow as tf
from tensorflow import keras
import tensorflow_addons as tfa
import datetime
import pdb
import numpy as np
from dataset import find_dataset_generator_using_name
from models import find_model_using_name
from options.options import get_arguments
from matplotlib import pyplot as plt
from utils import *
tf.random.set_seed(0)
#random.seed(0)
np.random.seed(0)
def logits_mean_absolute_error(y_true, y_pred):
y_pred = tf.sigmoid(y_pred)
return keras.metrics.mean_absolute_error(y_true, y_pred)
def logits_mean_squared_error(y_true, y_pred):
y_pred = tf.sigmoid(y_pred)
return keras.metrics.mean_squared_error(y_true, y_pred)
def wbce(y_true, y_pred, weight1 = 1, weight0 = 1) :
y_true = tf.keras.clip(y_true, K.epsilon(), 1-K.epsilon())
y_pred = tf.keras.clip(y_pred, K.epsilon(), 1-K.epsilon())
logloss = -(y_true * K.log(y_pred) * weight1 + (1 - y_true) * K.log(1 - y_pred) * weight0 )
return tf.keras.mean( logloss, axis=-1)
def predict_ensemble(args):
model_generator = find_model_using_name(args.model)
dataset_generator = find_dataset_generator_using_name(args.dataset)
model = model_generator(args)
model: keras.Model
train_dataset, val_dataset, test_dataset = dataset_generator(args)
unshuffled_train = train_dataset
train_dataset = train_dataset.shuffle(len(train_dataset), reshuffle_each_iteration=True)
if args.saved != None:
predict_allf = []
for s in range(9):
unique_train_signature = '%s/%s/%s/%s' % (
dataset_generator.__name__,
model_generator.__name__,
'regression' if args.regression else 'classification',
('fold_'+ str(s)))
#model_path = 'saved_model/'+unique_train_signature
model_path = args.saved+('/fold_'+ str(s))
if args.regression:
model = keras.models.load_model(model_path,custom_objects={'logits_mean_absolute_error':logits_mean_absolute_error,'logits_mean_squared_error':logits_mean_squared_error})
else:
model = keras.models.load_model(model_path)
output = np.array(model.predict(test_dataset).flat)
predict_allf.append(output)
predict_allf = np.array(predict_allf)
# mean across ensemble members
predict_mean = np.mean(predict_allf, axis=0)
#sigmoid
outputs = np.array(list(tf.sigmoid(predict_mean).numpy().flat))
#labels = [label for (inputs, label) in test_dataset.unbatch()]
labels = np.array([label for (input, label) in test_dataset.unbatch().as_numpy_iterator()])
test_inputs = [inputs for (inputs, label) in test_dataset.unbatch()]
if len(test_inputs[0]) == 2:
test_sequences =[np.array(sequences) for (sequences, features) in test_inputs]
test_features = [features for (sequences, features) in test_inputs]
else:
test_sequences = [np.array(sequences[0]) for sequences in test_inputs]
def encoded_nuc_to_str(encoded_seq):
indices = np.argmax(encoded_seq, axis=1)
return ''.join([base_positions[i] for i in indices])
test_predic =[]
for i in range(len(outputs)):
nuc_sequence = encoded_nuc_to_str(test_sequences[i][:,0:4])
test_predic.append([nuc_sequence,outputs[i],labels[i]])
#test_predic.append([nuc_sequence,outputs[i]])
test_df = pd.DataFrame(test_predic, columns = ['spacer sequence', 'predicted_value_sigmoid','true label'])
#test_df['output rank'] = test_df['predicted_value_sigmoid'].rank(ascending=False)
dataset_folder = 'results/' + args.dataset + '/'
#model_folder = dataset_folder + model_name + '/'
model_folder = dataset_folder + args.model + '_classification/'
if not os.path.exists(dataset_folder):
os.mkdir(dataset_folder)
if not os.path.exists(model_folder):
os.mkdir(model_folder)
test_df.to_csv('%s%s_%s_ensemble.csv' % (model_folder, "test_prediction", 'gl-'+str(args.guidelength)))
#evaluate
score = roc_auc_score(labels, outputs)
#fpr, tpr, _ = roc_curve(labels, outputs)
print('AUROC: '+str(score))
average_precision = average_precision_score(labels, outputs)
#precision, recall, thres_prc = precision_recall_curve(labels, outputs)
print('AUPRC: '+str(average_precision))
# #stats for different thresholds
# thres_list = [0.8, 0.9,0.95]
# for thres in thres_list:
# print(thres)
# df_pre_good = test_df[test_df['predicted_value_sigmoid']>thres]
# true_bin_ratio = df_pre_good['true label'].values
# num_real_gg = np.count_nonzero(true_bin_ratio)
# if len(true_bin_ratio)>0:
# gg_ratio = num_real_gg/len(true_bin_ratio)
# print('true good guide percent '+str(gg_ratio))
else:
if args.focal:
loss = lambda y_true, y_pred: tfa.losses.sigmoid_focal_crossentropy(y_true, y_pred, from_logits=True)
else:
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
optimizer = keras.optimizers.Adam(lr=args.lr)
if args.regression:
metrics = [logits_mean_absolute_error, logits_mean_squared_error]
model.compile(optimizer=optimizer, loss=tf.keras.losses.MeanSquaredError(), metrics=metrics)
else:
# metrics = ['accuracy', keras.metrics.Precision(), keras.metrics.Recall()] #doesn't work with logits
metrics = ['accuracy']
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
unique_train_signature = '%s/%s/%s/%s' % (
dataset_generator.__name__,
model_generator.__name__,
'regression' if args.regression else 'classification',
('fold_'+ str(args.split))
#datetime.datetime.now().isoformat()
)
callbacks = [
keras.callbacks.EarlyStopping(patience=16, restore_best_weights=True),
keras.callbacks.TensorBoard(log_dir='logs/%s' % unique_train_signature,
# update_freq=100
),
keras.callbacks.ModelCheckpoint('checkpoints/%s' % unique_train_signature, save_best_only=True)
]
if args.weighted:
#weight_zeros = 1.0 / (0.8 * 2.0) # 1 / (% of class * number of classes)
#weight_ones = 1.0 / (0.2 * 2.0)
weight_zeros = 1.0
weight_ones = 2.0
class_weights = {0: weight_zeros, 1: weight_ones}
history = model.fit(train_dataset, epochs=200,
validation_data=val_dataset,
verbose=1,
callbacks=callbacks,
class_weight=class_weights
)
else:
history = model.fit(train_dataset, epochs=200,
validation_data=val_dataset,
verbose=1,
callbacks=callbacks
)
model.save('saved_model/%s' % unique_train_signature)
#if args.regression:
# get_regression_metrics_cd(model, test_dataset, args.regression, args.kfold, args.split, model_name=args.model, dataset_name=args.dataset)
# get_pseudo_roc_for_regression(model, test_dataset, args.regression, args.kfold, args.split, model_name=args.model, dataset_name=args.dataset)
#else:
# fig, (ax1, ax2) = plt.subplots(1, 2)
# get_classification_metrics(model, unshuffled_train, fig, ax1, ax2, args.regression, args.kfold, args.split, args.guidelength, model_name=args.model + ' on train',
# dataset_name=args.dataset, save=False)
# get_classification_metrics(model, val_dataset, fig, ax1, ax2, args.regression, args.kfold, args.split, args.guidelength, model_name=args.model + ' on val', dataset_name=args.dataset,
# save=False)
# classification_analysis_new(args.testset_path, model, test_dataset, args.regression, args.kfold, args.split, args.guidelength, model_name=args.model, dataset_name=args.dataset)
if args.gradients:
integrated_gradients(model, test_dataset, args.regression, args.kfold, args.split, model_name=args.model, dataset_name=args.dataset)
print("done!")
if __name__ == '__main__':
# Enable the following 3 lines if using a graphics card and you get CUDNN_STATUS_INTERNAL_ERROR
args = get_arguments()
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
gpus_to_use = args.gpu_ids.split(",")
for i in range(len(gpus_to_use)):
gpu_id = int(gpus_to_use[i])
tf.config.experimental.set_memory_growth(gpus[gpu_id], True)
predict_ensemble(args)
``` |
{
"source": "jingyibo123/FCGF",
"score": 3
} |
#### File: FCGF/util/file.py
```python
import os
import re
from os import listdir
from os.path import isfile, isdir, join, splitext
def read_txt(path):
"""Read txt file into lines.
"""
with open(path) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
return lines
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path, mode=0o755)
def sorted_alphanum(file_list_ordered):
def convert(text):
return int(text) if text.isdigit() else text
def alphanum_key(key):
return [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(file_list_ordered, key=alphanum_key)
def get_file_list(path, extension=None):
if extension is None:
file_list = [join(path, f) for f in listdir(path) if isfile(join(path, f))]
else:
file_list = [
join(path, f)
for f in listdir(path)
if isfile(join(path, f)) and splitext(f)[1] == extension
]
file_list = sorted_alphanum(file_list)
return file_list
def get_file_list_specific(path, color_depth, extension=None):
if extension is None:
file_list = [join(path, f) for f in listdir(path) if isfile(join(path, f))]
else:
file_list = [
join(path, f)
for f in listdir(path)
if isfile(join(path, f)) and color_depth in f and splitext(f)[1] == extension
]
file_list = sorted_alphanum(file_list)
return file_list
def get_folder_list(path):
folder_list = [join(path, f) for f in listdir(path) if isdir(join(path, f))]
folder_list = sorted_alphanum(folder_list)
return folder_list
``` |
{
"source": "jingyibo123/MinkowskiEngine",
"score": 2
} |
#### File: MinkowskiEngine/utils/quantization.py
```python
import torch
import numpy as np
from collections import Sequence
import MinkowskiEngineBackend as MEB
def fnv_hash_vec(arr):
"""
FNV64-1A
"""
assert arr.ndim == 2
# Floor first for negative coordinates
arr = arr.copy()
arr = arr.astype(np.uint64, copy=False)
hashed_arr = np.uint64(14695981039346656037) * \
np.ones(arr.shape[0], dtype=np.uint64)
for j in range(arr.shape[1]):
hashed_arr *= np.uint64(1099511628211)
hashed_arr = np.bitwise_xor(hashed_arr, arr[:, j])
return hashed_arr
def ravel_hash_vec(arr):
"""
Ravel the coordinates after subtracting the min coordinates.
"""
assert arr.ndim == 2
arr = arr.copy()
arr -= arr.min(0)
arr = arr.astype(np.uint64, copy=False)
arr_max = arr.max(0).astype(np.uint64) + 1
keys = np.zeros(arr.shape[0], dtype=np.uint64)
# Fortran style indexing
for j in range(arr.shape[1] - 1):
keys += arr[:, j]
keys *= arr_max[j + 1]
keys += arr[:, -1]
return keys
def quantize(coords):
r"""Returns a unique index map and an inverse index map.
Args:
:attr:`coords` (:attr:`numpy.ndarray` or :attr:`torch.Tensor`): a
matrix of size :math:`N \times D` where :math:`N` is the number of
points in the :math:`D` dimensional space.
Returns:
:attr:`unique_map` (:attr:`numpy.ndarray` or :attr:`torch.Tensor`): a
list of indices that defines unique coordinates.
:attr:`coords[unique_map]` is the unique coordinates.
:attr:`inverse_map` (:attr:`numpy.ndarray` or :attr:`torch.Tensor`): a
list of indices that defines the inverse map that recovers the original
coordinates. :attr:`coords[unique_map[inverse_map]] == coords`
Example::
>>> unique_map, inverse_map = quantize(coords)
>>> unique_coords = coords[unique_map]
>>> print(unique_coords[inverse_map] == coords) # True, ..., True
>>> print(coords[unique_map[inverse_map]] == coords) # True, ..., True
"""
assert isinstance(coords, np.ndarray) or isinstance(coords, torch.Tensor), \
"Invalid coords type"
if isinstance(coords, np.ndarray):
assert coords.dtype == np.int32, f"Invalid coords type {coords.dtype} != np.int32"
return MEB.quantize_np(coords.astype(np.int32))
else:
# Type check done inside
return MEB.quantize_th(coords.int())
def quantize_label(coords, labels, ignore_label):
assert isinstance(coords, np.ndarray) or isinstance(coords, torch.Tensor), \
"Invalid coords type"
if isinstance(coords, np.ndarray):
assert isinstance(labels, np.ndarray)
assert coords.dtype == np.int32, f"Invalid coords type {coords.dtype} != np.int32"
assert labels.dtype == np.int32, f"Invalid label type {labels.dtype} != np.int32"
return MEB.quantize_label_np(coords, labels, ignore_label)
else:
assert isinstance(labels, torch.Tensor)
# Type check done inside
return MEB.quantize_label_th(coords, labels.int(), ignore_label)
def sparse_quantize(coords,
feats=None,
labels=None,
ignore_label=-100,
return_index=False,
return_inverse=False,
quantization_size=None):
r"""Given coordinates, and features (optionally labels), the function
generates quantized (voxelized) coordinates.
Args:
:attr:`coords` (:attr:`numpy.ndarray` or :attr:`torch.Tensor`): a
matrix of size :math:`N \times D` where :math:`N` is the number of
points in the :math:`D` dimensional space.
:attr:`feats` (:attr:`numpy.ndarray` or :attr:`torch.Tensor`, optional): a
matrix of size :math:`N \times D_F` where :math:`N` is the number of
points and :math:`D_F` is the dimension of the features. Must have the
same container as `coords` (i.e. if `coords` is a torch.Tensor, `feats`
must also be a torch.Tensor).
:attr:`labels` (:attr:`numpy.ndarray` or :attr:`torch.IntTensor`,
optional): integer labels associated to eah coordinates. Must have the
same container as `coords` (i.e. if `coords` is a torch.Tensor,
`labels` must also be a torch.Tensor). For classification where a set
of points are mapped to one label, do not feed the labels.
:attr:`ignore_label` (:attr:`int`, optional): the int value of the
IGNORE LABEL.
:attr:`torch.nn.CrossEntropyLoss(ignore_index=ignore_label)`
:attr:`return_index` (:attr:`bool`, optional): set True if you want the
indices of the quantized coordinates. False by default.
:attr:`return_inverse` (:attr:`bool`, optional): set True if you want
the indices that can recover the discretized original coordinates.
False by default. `return_index` must be True when `return_reverse` is True.
Example::
>>> unique_map, inverse_map = sparse_quantize(discrete_coords, return_index=True, return_inverse=True)
>>> unique_coords = discrete_coords[unique_map]
>>> print(unique_coords[inverse_map] == discrete_coords) # True
:attr:`quantization_size` (:attr:`float`, :attr:`list`, or
:attr:`numpy.ndarray`, optional): the length of the each side of the
hyperrectangle of of the grid cell.
Example::
>>> # Segmentation
>>> criterion = torch.nn.CrossEntropyLoss(ignore_index=-100)
>>> coords, feats, labels = MinkowskiEngine.utils.sparse_quantize(
>>> coords, feats, labels, ignore_label=-100, quantization_size=0.1)
>>> output = net(MinkowskiEngine.SparseTensor(feats, coords))
>>> loss = criterion(output.F, labels.long())
>>>
>>> # Classification
>>> criterion = torch.nn.CrossEntropyLoss(ignore_index=-100)
>>> coords, feats = MinkowskiEngine.utils.sparse_quantize(coords, feats)
>>> output = net(MinkowskiEngine.SparseTensor(feats, coords))
>>> loss = criterion(output.F, labels.long())
"""
assert isinstance(coords, np.ndarray) or isinstance(coords, torch.Tensor), \
'Coords must be either np.array or torch.Tensor.'
use_label = labels is not None
use_feat = feats is not None
assert coords.ndim == 2, \
"The coordinates must be a 2D matrix. The shape of the input is " + str(coords.shape)
if return_inverse:
assert return_index, "return_reverse must be set with return_index"
if use_feat:
assert feats.ndim == 2
assert coords.shape[0] == feats.shape[0]
if use_label:
assert coords.shape[0] == len(labels)
dimension = coords.shape[1]
# Quantize the coordinates
if quantization_size is not None:
if isinstance(quantization_size, (Sequence, np.ndarray, torch.Tensor)):
assert len(
quantization_size
) == dimension, "Quantization size and coordinates size mismatch."
if isinstance(coords, np.ndarray):
quantization_size = np.array([i for i in quantization_size])
discrete_coords = np.floor(coords / quantization_size)
else:
quantization_size = torch.Tensor([i for i in quantization_size])
discrete_coords = (coords / quantization_size).floor()
elif np.isscalar(quantization_size): # Assume that it is a scalar
if quantization_size == 1:
discrete_coords = coords
else:
discrete_coords = np.floor(coords / quantization_size)
else:
raise ValueError('Not supported type for quantization_size.')
else:
discrete_coords = coords
discrete_coords = np.floor(discrete_coords)
if isinstance(coords, np.ndarray):
discrete_coords = discrete_coords.astype(np.int32)
else:
discrete_coords = discrete_coords.int()
# Return values accordingly
if use_label:
mapping, colabels = quantize_label(discrete_coords, labels,
ignore_label)
if return_index:
return mapping, colabels
else:
if use_feat:
return discrete_coords[mapping], feats[mapping], colabels
else:
return discrete_coords[mapping], colabels
else:
unique_map, inverse_map = quantize(discrete_coords)
if return_index:
if return_inverse:
return unique_map, inverse_map
else:
return unique_map
else:
if use_feat:
return discrete_coords[unique_map], feats[unique_map]
else:
return discrete_coords[unique_map]
``` |
{
"source": "jingyibo123/sudoku_detector_opencv",
"score": 2
} |
#### File: sudoku_detector_opencv/py/eval_detect.py
```python
import cv2
import numpy as np
from timer import Timer
from sudoku_detector import parse_sudoku, find_grid, extract_digits, locate_digit_in_cell
from solve import solver
from svm import train_svm, SZ
import os
import json
import xlsxwriter
from datashape.coretypes import int32
SZ = 9
timer = Timer(True)
image_path = '../../resource/images/all/'
generate_digit_path = '../../resource/extractedDigits/'
generate_cell_path = '../../resource/extractedCells/'
def search_testfiles(path = image_path):
filenames = []
for filename in os.listdir(path):
if filename.endswith(".jpg"):
filenames.append(filename.rstrip(".jpg"))
if filename.endswith(".png"):
filenames.append(filename.rstrip(".png"))
return filenames
def extract_digit_samples_from_puzzles():
filenames = search_testfiles()
for name in filenames:
expected = load_dat(name)
img = cv2.imread(image_path + name + '.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
grid = find_grid(gray)
if grid is None:
continue
digit_images = extract_digits(grid)
for pos, digit in digit_images.items():
cv2.imwrite(generate_digit_path+'all/from'+name+'_digit'+str(expected[pos])+'_pos'+str(pos)+'.png', digit)
cv2.imwrite(generate_digit_path+str(expected[pos])+'/'+'from'+name+'_digit'+str(expected[pos])+'_pos'+str(pos)+'.png', digit)
def extract_cells_from_puzzles():
filenames = search_testfiles(image_path)
# read from file
with open('../../resource/images_grid_pts.txt', 'r') as f:
img_grid_pts = json.load(f)
for name in filenames:
expected = load_dat(name)
img = cv2.imread(image_path + name + '.jpg')
square = np.asarray(img_grid_pts[name], np.float32)
grid_rows = int(square[3][1] - square[0][1] + square[2][1] - square[1][1]) // 2
grid_cols = int(square[1][0] - square[0][0] + square[2][0] - square[3][0]) // 2
h = np.array([ [0,0],[grid_cols - 1,0],[grid_cols - 1,grid_rows - 1],[0,grid_rows - 1] ],np.float32)
# perspective transformation
retval = cv2.getPerspectiveTransform(square,h)
grid = cv2.warpPerspective(img,retval,(grid_cols, grid_rows))
v_lines = [int(i * grid.shape[1] / SZ) for i in range(0, SZ + 1)]
h_lines = [int(i * grid.shape[0] / SZ) for i in range(0, SZ + 1)]
for n in range(81):
cell = grid[h_lines[n//SZ]:h_lines[n//SZ + 1], v_lines[n%SZ]:v_lines[n%SZ + 1]]
cell = cv2.resize(cell, (128, 128));
cv2.imwrite(generate_cell_path+'all/from'+name+'_digit'+str(expected[n])+'_pos'+str(n)+'.png', cell)
cv2.imwrite(generate_cell_path+str(expected[n])+'/'+'from'+name+'_digit'+str(expected[n])+'_pos'+str(n)+'.png', cell)
pass
def load_dat(filename):
digits = []
with open(image_path + filename +".dat") as f:
txt_lines = f.readlines()[2:]
for l in txt_lines:
digits.extend([int(s) for s in l.strip().split(' ')])
return(digits)
def eval_digits(result, expected):
re = {}
result = np.int8(result).ravel()
expected = np.int8(expected).ravel()
total = 0
for ex in expected:
if ex != 0:
total = total + 1
re['total'] = total
# re['total'] = int(np.count_nonzero(expected))
re['missed'] = 0
if np.count_nonzero(result) < np.count_nonzero(expected):
re['missed'] = int(np.count_nonzero(expected) - np.count_nonzero(result))
re['wrong'] = int(np.sum(result != expected) - re['missed'])
return re
def eval_all():
imgs = search_testfiles(image_path)
incorrect = {}
eval_result = {}
timer.timeit()
notGrid = 0
for name in imgs:
expected = load_dat(name)
digits = parse_sudoku(image_path + name + '.jpg')
if digits is None:
print("grid not found", name)
notGrid = notGrid + 1
continue
re = eval_digits(digits, expected)
eval_result[name] = re
if sum(digits) != 0:
for i in range(81):
if expected[i] != digits[i]:
inc = {'pos': i, 'expected': expected[i], 'result': digits[i]}
incorrect[name] = inc
print("grid not found : " + str(notGrid))
timer.timeit('all images processed')
print(incorrect)
total = [r['total'] for k, r in eval_result.items()]
miss = [r['missed'] for k, r in eval_result.items()]
wrong = [r['wrong'] for k, r in eval_result.items()]
print('total :', sum(total), 'correct :', sum(total)-sum(miss)-sum(wrong), 'missed :', sum(miss), 'wrong :', sum(wrong), 'correct ratio :', 1-sum(wrong)/(sum(total)-sum(miss)))
return eval_result
def generate_digit_box():
"""
use opencv to locate digit in grid images, output the rounding box coordinates and draw the box on the grid image.
"""
imgs = search_testfiles('../../resource/output/grid/all/')
# read from file
img_grid_pts = dict()
for name in imgs:
expected = load_dat(name)
boxes = dict()
grid = cv2.imread('../../resource/output/grid/all/' + name + '.png')
grid_gray = cv2.cvtColor(grid,cv2.COLOR_BGR2GRAY)
grid_blur = cv2.GaussianBlur(grid_gray,(7, 7),0)
grid_thresh = cv2.adaptiveThreshold(grid_blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 15, 2)
# Use morphology to remove noise
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
grid_thresh = cv2.morphologyEx(grid_thresh, cv2.MORPH_OPEN, kernel)
v_lines = [int(i * grid.shape[1] / SZ) for i in range(0, SZ + 1)]
h_lines = [int(i * grid.shape[0] / SZ) for i in range(0, SZ + 1)]
for n in range(81):
if expected[n] == 0:
continue
cell_raw = grid_thresh[h_lines[n//SZ]:h_lines[n//SZ + 1], v_lines[n%SZ]:v_lines[n%SZ + 1]]
digit = locate_digit_in_cell(n, cell_raw)
if digit is None or len(digit) != 4 :
print("digit not located", name, n)
continue
for d in digit:
d[0] = d[0] + v_lines[n%SZ]
d[1] = d[1] + h_lines[n//SZ]
grid = cv2.line(grid, tuple(digit[0]), tuple(digit[1]), (255,0,0),1)
grid = cv2.line(grid, tuple(digit[1]), tuple(digit[2]), (255,0,0),1)
grid = cv2.line(grid, tuple(digit[2]), tuple(digit[3]), (255,0,0),1)
grid = cv2.line(grid, tuple(digit[3]), tuple(digit[0]), (255,0,0),1)
boxes[n] = digit
cv2.imwrite('../../resource/output/grid/all_boxes/' + name + '.png', grid)
img_grid_pts[name] = boxes
# write to file
with open('../../resource/grid_digit_boxes.txt', 'w') as f:
json.dump(img_grid_pts, f)
def draw_digit_box():
path = '../../resource/output/grid/all/'
imgs = search_testfiles(path)
# read from file
with open('../../resource/grid_digit_boxes.txt', 'r') as f:
img_grid_pts = json.load(f)
for name in imgs:
expected = load_dat(name)
grid = cv2.imread('../../resource/output/grid/all/' + name + '.png')
for n in range(81):
if expected[n] == 0:
continue
if str(n) not in img_grid_pts[name]:
print("box not defined for ", name, " cell no.", n)
continue
digit = img_grid_pts[name][str(n)]
grid = cv2.line(grid, tuple(digit[0]), tuple(digit[1]), (255,0,0),1)
grid = cv2.line(grid, tuple(digit[1]), tuple(digit[2]), (255,0,0),1)
grid = cv2.line(grid, tuple(digit[2]), tuple(digit[3]), (255,0,0),1)
grid = cv2.line(grid, tuple(digit[3]), tuple(digit[0]), (255,0,0),1)
cv2.imwrite('../../resource/output/grid/all_boxes/' + name + '.png', grid)
def class_int_to_text(row_label):
if row_label == 1:
return 'one'
if row_label == 2:
return 'two'
if row_label == 3:
return 'three'
if row_label == 4:
return 'four'
if row_label == 5:
return 'five'
if row_label == 6:
return 'six'
if row_label == 7:
return 'seven'
if row_label == 8:
return 'eight'
if row_label == 9:
return 'nine'
else:
return None
def generate_grid_digits_label_csv():
path = '../../resource/output/grid/all/'
imgs = search_testfiles(path)
# read from file
with open('../../resource/grid_digit_boxes.txt', 'r') as f:
img_grid_pts = json.load(f)
sep = ";"
with open('../../resource/grid_digit_labels.csv', 'w') as f:
f.write("filename"+sep+"width"+sep+"height"+sep+"class"+sep+"xmin"+sep+"ymin"+sep+"xmax"+sep+"ymax\n")
for name in imgs:
expected = load_dat(name)
grid = cv2.imread('../../resource/output/grid/all/' + name + '.png')
for n in range(81):
if expected[n] == 0:
continue
if str(n) not in img_grid_pts[name]:
print("box not defined for ", name, " cell no.", n)
continue
f.write(name + ".png" + sep + str(grid.shape[1]) + sep + str(grid.shape[0]) + sep + class_int_to_text(expected[n]) + sep
+ str(img_grid_pts[name][str(n)][0][0]) + sep + str(img_grid_pts[name][str(n)][0][1]) + sep
+ str(img_grid_pts[name][str(n)][2][0]) + sep + str(img_grid_pts[name][str(n)][2][1]) + "\n")
def generate_puzzle_grid_label_csv():
path = '../../resource/images/all/'
imgs = search_testfiles(path)
# read from file
with open('../../resource/images_grid_pts.txt', 'r') as f:
img_grid_pts = json.load(f)
sep = ";"
with open('../../resource/images_grid_pts.csv', 'w') as f:
f.write("filename"+sep+"width"+sep+"height"+sep+"class"+sep+"xmin"+sep+"ymin"+sep+"xmax"+sep+"ymax\n")
for name in imgs:
img = cv2.imread('../../resource/images/all/' + name + '.jpg')
max = np.asarray(img_grid_pts[name], dtype=np.int32).max(axis=0)
min = np.asarray(img_grid_pts[name], dtype=np.int32).min(axis=0)
f.write(name + ".jpg" + sep + str(img.shape[1]) + sep + str(img.shape[0]) + sep + "sudoku" + sep
+ str(min[0]) + sep + str(min[1]) + sep
+ str(max[0]) + sep + str(max[1]) + "\n")
def eval_one(name):
expected = load_dat(name)
digits = parse_sudoku(image_path + name + '.jpg')
if digits is None:
print("grid not found")
return
# TODO loop result to find wrong ones
incorrect = []
for i in range(81):
if expected[i] != digits[i]:
inc = {'pos': i, 'expected': expected[i], 'result': digits[i]}
incorrect.append(inc)
print("incorrect : ", incorrect)
re = eval_digits(digits, expected)
print(re)
def xlsx(re):
# write to xlsx
with xlsxwriter.Workbook('eval_all.xlsx') as book:
# Raw data
sheet = book.add_worksheet('raw')
# fetch data
# Fill worksheet
# write column names
sheet.write(0, 1, "Total")
sheet.write(0, 2, "missed")
sheet.write(0, 3, "wrong")
j = 1
for k, r in re.items():
sheet.write(j, 0, k)
sheet.write(j, 1, r['total'])
sheet.write(j, 2, r['missed'])
sheet.write(j, 3, r['wrong'])
j = j + 1
if __name__ == '__main__':
# train_svm()
# eval_one("image1024")# high resolution, clear, mid curl
# eval_one("image211") # middle resolution, clear, mid curl
# eval_one("image25") # low resolution, blurry, mid curl
# eval_one("image17") # low resolution, blurry
# parse_sudoku("../../resource/cascade.png")
# eval_one("image153") # easy
# eval_one("image34") # blurry
# eval_one("image1087")
# re = eval_all()
# extract_cells_from_puzzles()
# extract_digit_samples_from_puzzles()
# xlsx(re)
```
#### File: sudoku_detector_opencv/py/timer.py
```python
import time
class Timer():
def __init__(self, active):
self.time = time.time()
self.start_time = time.time()
self.active = active
def timeit(self, msg = None):
if msg and self.active:
print('--------------------Time : ', '{0:.2f}'.format(time.time()*1000.0 - self.time*1000.0) , 'ms used for ____', msg )
self.time = time.time()
def total_time(self, msg = ''):
if self.active:
print('--------------------Total time used '+msg+' : ', '{0:.2f}'.format(time.time()*1000.0 - self.start_time*1000.0), ' ms')
``` |
{
"source": "Jingyi-hu/reportdetails",
"score": 3
} |
#### File: reportdetails/src/gnss_process.py
```python
import gnssmapper as gm
import warnings
warnings.filterwarnings("ignore")
import copy
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import geopandas as gpd
def create_geodata(x):
"""
create one geodataframe for input list where each elemnt is geopandas dataframe of gnss receiverpoints
Parameters
----------
x : list
list of the multify gnss geopandas dataframe data.
Returns
-------
geodataframe:
combine all elements in the list of gnss receiverpoints including:
time
Cn0DbHz
svid
receiver position (as point geometry)
"""
list_len = len(x)
pilot_log = pd.concat(x[i][['time','Cn0DbHz','svid','geometry']] for i in range(list_len))
return pilot_log
def time_form(gdf):
"""
change the time form in geodataframe from microseconds to seconds
Parameters
----------
gdf : geodataframe
geodataframe of gnss data which has column of time
Returns
-------
geodataframe:
new time form in %Year-%month-%dayT%Hour:%Minite:%Second
"""
gdf['time'] = gdf['time'].dt.strftime("%Y-%m-%dT%H:%M:%S")
return gdf
def valid_svid(gdf):
"""
removed invalid satellites from geodataframe
Parameters
----------
gdf : geodataframe
geodataframe of gnss data which has column of svid
Returns
-------
geodataframe:
geodataframe of gnss data and the satellites are gps,glonass,beidou and galileo
"""
#define all valid satellites
svid = ('G01', 'G02', 'G03', 'G04', 'G05', 'G06', 'G07', 'G08', 'G09', 'G10', 'G11', 'G12', 'G13', 'G14', 'G15', 'G16', 'G17', 'G18', 'G19', 'G20', 'G21', 'G22', 'G23', 'G24', 'G25', 'G26', 'G27', 'G28', 'G29', 'G30', 'G31', 'G32',
'R01', 'R02', 'R03', 'R04', 'R05', 'R06', 'R07', 'R08', 'R09', 'R10', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R24',
'C01', 'C02', 'C03', 'C04', 'C05', 'C06', 'C07', 'C08', 'C09', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26', 'C27', 'C28', 'C29', 'C30', 'C31', 'C32', 'C33', 'C34', 'C35', 'C36', 'C37',
'E01', 'E02', 'E03', 'E04', 'E05', 'E06', 'E07', 'E08', 'E09', 'E10', 'E11', 'E12', 'E13', 'E14', 'E15', 'E16', 'E17', 'E18', 'E19', 'E20', 'E21', 'E22', 'E23', 'E24', 'E25', 'E26', 'E27', 'E28', 'E29', 'E30', 'E31', 'E32', 'E33', 'E34', 'E35', 'E36')
gdf = gdf.loc[gdf['svid'].isin(svid),['svid','time','Cn0DbHz','geometry']]
return gdf
def data_format(gdf):
"""
Adjust data format
Parameters
----------
gdf : geodataframe
geodataframe of gnss data
Returns
-------
geodataframe:
geodataframe of gnss data with new format and sea level
"""
#Adjust data format
gdf.time = gdf.time.astype('datetime64')
gdf.svid = gdf.svid.astype('string')
#re-define ground sea level of altitude
gdf.geometry=gpd.points_from_xy(gdf.geometry.x,gdf.geometry.y,float(381))
return gdf
def data_sort(gdf,str):
"""
sort the geodataframe by special string
Parameters
----------
gdf : geodataframe
geodataframe of gnss data
str: sort based on this string
Returns
-------
geodataframe:
geodataframe of gnss data after sorting
"""
gdf = gdf.sort_values(by = [str])
return gdf
def day_night(gdf):
"""
divide the geodataframe by day and night
Parameters
----------
gdf : geodataframe
geodataframe of gnss data
Returns
-------
geodataframe:
two geodataframes of gnss data, one is
data collected during data, another is
during night
"""
#add day column to define different data collected day
gdf['day'] = gdf['time'].apply(lambda r:r.day)
gdf_day = gdf[gdf['day']==1]
gdf_night = gdf[gdf['day']==3]
return gdf_day,gdf_night
```
#### File: reportdetails/tests/test_distance.py
```python
import unittest
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import geopandas as gpd
import math
from math import sin,cos
#The algorithm is only for the data of east longitude and north latitude
def distance(longitudeA,latitudeA,longitudeB,latitudeB):
#define radius of earth
R = 6371.004
MLonA = longitudeA
MLonB = longitudeB
#since points are at north latitude
MLatA = 90 - latitudeA
MLatB = 90 - latitudeB
C = sin(MLatA)*sin(MLatB)*cos(MLonA-MLonB)+cos(MLatA)*cos(MLatB)
#here ignore 1000since want to test in kilometers
distance = round(R*np.arccos(C)*math.pi/180,0)
return distance
class TestDistance(unittest.TestCase):
def test_distance(self):
#Approximately 111 kilometers at a longitude
self.assertEqual(distance(190,35,191,35), 111)
#Approximately 111 kilometers at a latitude
self.assertEqual(distance(190,35,190,36), 111)
unittest.main()
```
#### File: reportdetails/tests/test_random_frac.py
```python
import unittest
import pandas as pd
def random_frac(gdf,x):
df = pd.DataFrame()
sample = gdf.sample(frac=(x+1)/10,axis=0)
df = df.append(sample)
return df
class TestRandomFrac(unittest.TestCase):
def test_random_frac(self):
data = {
'person':['a','b','c','d','e','f','g','h','i','j'],
'age':[24,23,30,4,21,53,20,45,48,60]}
df = pd.DataFrame(data)
self.assertEqual(len(random_frac(df,3)), 4)
unittest.main()
```
#### File: reportdetails/tests/test_rms.py
```python
import unittest
import math
def rmse(x):
a = sum(((x[j] -405) **2) for j in range(len(x)))
b = a/len(x)
c = math.sqrt(b)
return c
class TestRMSE(unittest.TestCase):
def test_rmse(self):
numbers = [400,410,420,430,440,450]
self.assertAlmostEqual(int(rmse(numbers)), 26)
self.assertEqual(round(rmse(numbers),7), 26.2995564)
unittest.main()
``` |
{
"source": "jingyi-ju/algorithm",
"score": 4
} |
#### File: jingyi-ju/algorithm/selectionSort.py
```python
from helper import swap
def selection_sort(list):
''' best and worst case runtime: Θ(n^2) '''
for i in range(0,len(list)-1): # after n-1 iterations, n-1 smallest elements in L subarray
min = i
for j in range(i+1,len(list)): # elements are compared in both B-case and W-case
if list[j] < list[min]:
min = j
swap(list,min,i)
``` |
{
"source": "jingyonghou/STD_Q2014",
"score": 3
} |
#### File: STD_Q2014/script/getPhone2StateMap.py
```python
import sys
SIL_SET={"int", "pau", "spk"}
def append_dim(num, dim_count, dim_list):
for i in range(num):
dim_list.append(dim_count)
dim_count += 1
if __name__=="__main__":
if len(sys.argv) < 3:
print("USAGE: " + sys.argv[0] + " phone_list phone_2_state_map\n")
exit(1)
phone_list = open(sys.argv[1]).readlines()
fid = open(sys.argv[2], "w")
dim_count=0
phone_2_state_map = [[]]
for phone in phone_list:
if(phone.strip() in SIL_SET):
append_dim(3, dim_count, phone_2_state_map[0])
dim_count += 3
else:
phone_2_state_map.append([])
append_dim(3, dim_count, phone_2_state_map[-1])
dim_count += 3
for i in range(len(phone_2_state_map)):
for j in range(len(phone_2_state_map[i])):
fid.write("%d %d\n" % (i, phone_2_state_map[i][j]))
fid.close()
```
#### File: STD_Q2014/script/getPhoneBoundary.py
```python
import sys
from dataIn import HTKFeat_read
import numpy as np
import distance_matrix as DM
THRESHOLD=0.5
DELAY=1
def is_boundary(SIL_probability_mean, threshold=THRESHOLD):
return SIL_probability_mean < threshold
def time_delay(data, step):
dim = data.shape[1]
if(step == 0):
return data
elif(step>=0):
new_data = data[step:, :]
pad_data = np.tile((np.zeros([1, dim])+1)/dim, [step, 1])
return np.concatenate([new_data, pad_data])
else:
new_data = data[0:step, :]
pad_data = np.tile((np.zeros([1, dim])+1)/dim, (-step, dim))
return np.concatenate([new_data, pad_data])
def write_list(fid, list_):
for i in range(len(list_)-1):
fid.writelines(str(list_[i])+" ")
fid.writelines(str(list_[-1])+ "\n")
if __name__=="__main__":
if len(sys.argv) < 5:
print("USAGE: " + sys.argv[0] + " data_dir data_list output_data_type input_data_types\n")
exit(1)
data_dir = sys.argv[1]
data_lists = open(sys.argv[2]).readlines()
output_data_type = sys.argv[3]
input_data_types = []
for i in range(4, len(sys.argv)):
input_data_types.append(sys.argv[i])
input_data_type_len = len(input_data_types)
for i in range(len(data_lists)):
utterance_id = data_lists[i].strip()
fid = open(data_dir + utterance_id + "." + output_data_type, "w")
differences = []
for input_data_type in input_data_types:
input_file = data_dir + utterance_id + "." + input_data_type
data = HTKFeat_read(input_file).getall()
delay_data = time_delay(data, DELAY)
differences.append(DM.innetProduct_dot(data, delay_data))
write_list(fid, np.mean(differences, axis=0))
fid.close()
```
#### File: STD_Q2014/script/PhonePosteriorVAD.py
```python
import sys
from dataIn import HTKFeat_read
import numpy as np
THRESHOLD=0.5
def VAD(SIL_probability_mean, threshold=THRESHOLD):
return SIL_probability_mean < threshold
def write_list(fid, list_):
for i in range(len(list_)-1):
fid.writelines(str(list_[i]))
fid.writelines(str(list_[-1]))
if __name__=="__main__":
if len(sys.argv) < 5:
print("USAGE: " + sys.argv[0] + " data_dir data_list output_data_type input_data_types\n")
exit(1)
data_dir = sys.argv[1]
data_lists = open(sys.argv[2]).readlines()
output_data_type = sys.argv[3]
input_data_types = []
for i in range(4, len(sys.argv)):
input_data_types.append(sys.argv[i])
input_data_type_len = len(input_data_types)
for i in range(len(data_lists)):
utterance_id = data_lists[i].strip()
SIL_probability = []
for input_data_type in input_data_types:
input_file = data_dir + utterance_id + "." + input_data_type
data = HTKFeat_read(input_file).getall()
SIL_probability.append(data[:, 0])
SIL_probability_mean = np.mean(SIL_probability, axis=0)
VAD_result = VAD(SIL_probability_mean)
VAD_sesult_int = [int(element) for element in VAD_result]
output_file = data_dir + utterance_id + "." + output_data_type
fid = open(output_file, "w")
write_list(fid, VAD_sesult_int)
fid.close()
``` |
{
"source": "jingyonghou/TIMIT_STD",
"score": 3
} |
#### File: TIMIT_STD/DownSampling_STD/Distance.py
```python
import sys
import numpy as np
def KLdivergence(matrix_a, matrix_b, symmetrized=1):
height = matrix_a.shape[0]
width = matrix_b.shape[0]
matrix_a_log = np.log(matrix_a)
matrix_b_log = np.log(matrix_b)
vector_a_log_a = np.zeros([height, 1]);
vector_b_log_b = np.zeros([width, 1]);
for i in range(height):
vector_a_log_a[i, 0] = np.dot(np.array([matrix_a[i, :]]), np.array([matrix_a_log[i, :]]).T)[0][0]
for i in range(width):
vector_b_log_b[i, 0] = np.dot(np.array([matrix_b[i, :]]), np.array([matrix_b_log[i, :]]).T)[0][0]
matrix_a_log_b = np.dot(matrix_a, matrix_b_log.T)
matrix_b_log_a = np.dot(matrix_b, matrix_a_log.T)
if symmetrized==1:
distance_matrix = 0.5 * (vector_a_log_a - matrix_a_log_b) + 0.5 * (vector_b_log_b.T - matrix_b_log_a.T)
else:
distance_matrix = vector_a_log_a - matrix_a_log_b
return distance_matrix
def innerProduct(matrix_a, matrix_b):
distance_matrix = np.dot(matrix_a, matrix_b.T)
return 1 - distance_matrix
distance_function={"KL-divergence":KLdivergence, "cosine":innerProduct, "inner-product":innerProduct}
def distance(matrix_a, matrix_b, distance_type, sub_num=40):
height = matrix_a.shape[0]
width = matrix_b.shape[0]
dim = matrix_a.shape[1]
distance_matrix = np.zeros([height, width])
if dim%sub_num != 0:
print("Error: dimmension error!\n")
exit(1)
else:
encode_num = dim//sub_num
for i in range(encode_num):
mydistance=distance_function[distance_type]
distance_matrix = distance_matrix + mydistance(matrix_a[:, i*sub_num:(i+1)*sub_num], matrix_b[:, i*sub_num:(i+1)*sub_num])
return distance_matrix/encode_num
```
#### File: TIMIT_STD/DownSampling_STD/DTW.py
```python
import numpy as np
def subsequenceDTW(dist,debug=False):
'''subsequneceDTW'''
if debug:
cost,path = _python_subseq_dtw(dist)
else:
cost,path = _subseq_dtw(dist)
return cost,path
def _python_subseq_dtw(dist):
'''Pure python, slow version of DTW'''
nx,ny = dist.shape
cost = np.zeros(dist.shape)
trace = np.zeros(dist.shape,dtype=np.int)
length = np.zeros(dist.shape,dtype=np.int)
length[0,:] = 1
length[:,0] = range(1,nx+1)
cost[0,:] = dist[0,:]
cost[:,0] = np.cumsum(dist[:,0])
trace[0,:] = 1
trace[:,0] = 0
for i in xrange(1,nx):
for j in xrange(1,ny):
accum_cost = dist[i,j] + np.array((cost[i-1,j], cost[i,j-1], cost[i-1,j-1]))
accum_len = np.array((length[i-1,j],length[i,j-1],length[i-1,j-1]+1))+1
avg_cost = accum_cost/accum_len
trace[i,j] = avg_cost.argmin()
length[i,j] = accum_len[trace[i,j]]
cost[i,j] = accum_cost[trace[i,j]]
cost[nx-1,:] /= length[nx-1,:]
dtwCost = cost[nx-1,:].min()
endPoint = cost[nx-1,:].argmin()
path = [(nx-1,endPoint)]
j = endPoint
while not (i == 0):
s = trace[i,j]
if s == 0:
i -= 1
elif s == 1:
j -= 1
else:
i -= 1
j -= 1
path.append((i,j))
return dtwCost,np.array(path)[::-1]
# Shenanigans for running the fast C version of DTW,
# but falling back to pure python if needed
try:
from scipy.weave import inline
from scipy.weave.converters import blitz
except ImportError:
_subseq_dtw = _python_subseq_dtw
else:
def _subseq_dtw(dist):
'''Fast DTW, with inlined C'''
nx,ny = dist.shape
rv = [0.0,0] #dtwcost,p
path = np.zeros((nx+ny,2),dtype=np.int)
code = '''
int i,j;
double* cost = new double[ny];
for (j=0; j<ny; ++j) cost[j] = dist(0,j);
char** trace = new char*[nx];
int** length = new int*[nx];
for (i=0; i<nx; ++i) {
trace[i] = new char[ny];
trace[i][0] = 0;
length[i] = new int[ny];
length[i][0] = i+1;
}
for (j=0; j<ny; ++j) {
trace[0][j] = 1;
length[0][j] =1;
}
double diag,c;
for (i=1; i<nx; ++i){
diag = cost[0];
cost[0] += dist(i,0);
for (j=1; j<ny; ++j){
// c <- min(cost[j],cost[j-1],diag), trace <- argmin
////////////////////////////////////////////////////
double avg_cost1 = (cost[j]+dist(i,j))/(length[i-1][j]+1);
double avg_cost2 = (cost[j-1]+dist(i,j))/(length[i][j-1]+1);
double avg_diag = (diag+dist(i,j))/(length[i-1][j-1]+2);
if (avg_diag < avg_cost1){
if (avg_diag < avg_cost2){
c = diag;
trace[i][j] = 2;
length[i][j] = length[i-1][j-1]+2;
} else {
c = cost[j-1];
trace[i][j] = 1;
length[i][j] = length[i][j-1]+1;
}
} else if (avg_cost1 < avg_cost2){
c = cost[j];
trace[i][j] = 0;
length[i][j] = length[i-1][j]+1;
} else {
c = cost[j-1];
trace[i][j] = 1;
length[i][j] = length[i][j-1]+1;
}
diag = cost[j];
cost[j] = dist(i,j) + c;
}
}
rv[0] = cost[0]/length[nx-1][0];
for(i=1;i<ny;i++)
{
float avg_cost = cost[i]/length[nx-1][i];
if(rv[0]>avg_cost)
{
rv[0] = avg_cost;
j = i;
}
}
delete[] cost;
i = nx-1;
int p = nx+ny-1;
for (;p>=0; --p){
path(p,0) = i;
path(p,1) = j;
if (i==0) break;
switch (trace[i][j]){
case 0: --i; break;
case 1: --j; break;
default: --i; --j;
}
}
for (i=0; i<nx; ++i) delete[] trace[i];
delete[] trace;
for (i=0; i<nx; ++i) delete[] length[i];
delete[] length;
rv[1] = p;
'''
inline(code,('nx','ny','rv','dist','path'),type_converters=blitz)
return rv[0],path[rv[1]:]
```
#### File: TIMIT_STD/Partial_STD/split.py
```python
import sys
import numpy as np
import os
import shutil
def split_file_by_row(filepath,newfilenum):
file_object=open(filepath,'r')
s=filepath.rfind('/')
fileID=filepath.strip()[s+1:]
try:
i=0
all_the_text=[]
for line in file_object:
i=i+1
all_the_text.append(line)
#print line
n=i
print n
index=n//newfilenum
print index
for i in range(1,newfilenum):
file_object_write=open(fileID+'%d' % i,'w')
for line in all_the_text[int((i-1)*index):int(i*index)]:
file_object_write.write(line)
file_object_write.close()
file_object_write=open(fileID+'%d' % newfilenum,'w')
for line in all_the_text[int((newfilenum-1)*index):]:
file_object_write.write(line)
file_object_write.close()
finally:
file_object.close()
if __name__ == '__main__':
if len(sys.argv)<3:
print("USAGE: fileList splitNum")
fileList=sys.argv[1]
splitNum=int(sys.argv[2])
split_file_by_row(fileList,splitNum)
```
#### File: TIMIT_STD/script/evaluation.py
```python
import numpy as np
import sys
def longest_common_substring(s1, s2): # not dynamic programing, just brute force way.
m = [[0] * (1 + len(s2)) for i in xrange(1 + len(s1))]
longest, x_longest = 0, 0
for x in xrange(1, 1 + len(s1)):
for y in xrange(1, 1 + len(s2)):
if s1[x - 1] == s2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
return s1[x_longest - longest: x_longest]
def relevant(querylistitem,doc):
ind_s = querylistitem.rfind('/')
ind_e = querylistitem.rfind('_')
query = querylistitem.strip()[ind_s+1:ind_e]
fid = open(doc.strip()+'.WRD')
lines = fid.readlines()
fid.close()
for line in lines:
word = line.split()[2]
lcs = longest_common_substring(word,query)
#print 'linelength:'+str(len(lcs)) +'\t' + 'query ' + '"' + query + '"' + ' length: ' + str(len(query))
#print 'ratio: ' + str(float(len(lcs))/len(query))
if float(len(lcs))/len(query)>=0.8 :
#if word != query:
# print("Warnning: " + word + ", " + query + "\n" )
return True
return False
def evaluate(costlist, test_dir, doclist, keyword_dir, querylist):
'''
costlist = [query_1_list,...,query_n_list]
doclist: the path list for the every doc
querylist: the path list for the querys
'''
PatNset = []
APset = []
Pat10set = []
for i in range(0,len(querylist)):
ranklist = np.array(costlist[i]).argsort()
# percision[i] = num_rele/(i+1),num_rele is the number of the hits in the first i
Precision = []
num_rele = 0.0
sum_precision = 0.0
#print 'query' + str(i) + ':' + str(len(ranklist));
for j in range(0,len(ranklist)):
doc = doclist[ranklist[j]]
isRele = False
if relevant(keyword_dir+querylist[i], test_dir+doc):
#print 'true'
num_rele = num_rele+1
isRele = True
Precision.append(num_rele/(j+1))
if isRele == True:
sum_precision += Precision[-1]
# print j+1,Precision[-1]
#ind_s = querylist[i].rfind('/')
#ind_e = querylist[i].rfind('_')
#word = querylist[i].strip()[ind_s+1:ind_e]
#fid = open("std_out.log","a")
#fid.write("The hit numbers in the first 500 utterances for keyword " + word + " :\t " + str(Precision[499]*500) + "/" + str(num_rele)+"\t"+ str(Precision[499]*500/num_rele) +"\n")
#fid.close()
Pat10set.append(Precision[9])
N = int(num_rele)
#print querylist[i].strip()[31:-2],N
PatNset.append(Precision[N-1])
APset.append(sum_precision/N)
#print(str(Pat10set[-1]) + " " + str(PatNset[-1]) + " " + str(APset[-1]))
print(str(APset[-1]) + "\t" + str(PatNset[-1]) + "\t" + str(Pat10set[-1]))
num_of_querys = len(querylist)
MAP = sum(APset)/num_of_querys
PatN = sum(PatNset)/num_of_querys
Pat10 = sum(Pat10set)/num_of_querys
# print PatNset
# print Pat10set
return MAP,PatN,Pat10
if __name__=='__main__':
if len(sys.argv) < 5:
print 'USAGE: result_dir keywordlist test_dir testlist'
exit(1)
result_dir = sys.argv[1]
fid = open(sys.argv[2])
keywordlist = fid.readlines()
fid.close()
test_dir = sys.argv[3]
fid = open(sys.argv[4])
testlist = fid.readlines()
fid.close()
costlist = []
for keyword in keywordlist:
result_fid = open(result_dir + keyword.strip() + ".RESULT")
resList = result_fid.readlines()
result_fid.close();
scoreList = [];
for res in resList:
#testID,tbeg,dur,score = res.strip().split();
score = res.strip();
scoreList.append(float(score));
costlist.append(scoreList);
MAP,PatN,Pat10 = evaluate(costlist, test_dir, testlist, result_dir, keywordlist)
print('MAP=%.3f PatN=%.3f Pat10=%.3f'%(MAP,PatN,Pat10))
``` |
{
"source": "jingyonghou/XY_QByE_STD",
"score": 3
} |
#### File: XY_QByE_STD/script/get_keyword_audio.py
```python
import sys
import numpy as np
from XiaoYingWave import XiaoYingWave
import json
import log
def build_wav_dict(text_file):
wav_dict = {}
text_list = open(text_file).readlines()
for line in text_list:
fields = line.strip().split()
text_id = fields[0]
text = " ".join(fields[1:])
wav_dict[text_id] = XiaoYingWave(text_id, text)
return wav_dict
def build_score_dict(jsonlistfile):
score_dict = {}
json_list = open(jsonlistfile).readlines()
for line in json_list:
fields = line.strip().split("/")
text_id = "_".join(fields[-4:-2])
time_stamp = fields[-2][-8:]
if(not score_dict.has_key(text_id)):
score_dict[text_id]={}
json_items = open(line.strip()).readlines()
for json_item in json_items:
json_entity = json.loads(json_item.strip())
if json_entity.has_key("AudioName"):
audio_id = str(json_entity["AudioName"])
elif json_entity.has_key("audioName"):
audio_id = str(json_entity["audioName"])
else:
log.Error("bad json file:%s"%json_item)
if json_entity.has_key("Score"):
score = float(json_entity["Score"])
elif json_entity.has_key("score"):
score = float(json_entity["score"])
else:
log.Error("bad json file:%s"%json_item)
score_id = time_stamp + "_" + audio_id
if score_dict[text_id].has_key(score_id):
log.Error("repeated score_id: %s for text_id: %s"%(score_id, text_id))
score_dict[text_id][score_id] = score
return score_dict
def build_exclude_list(exclude_list_file):
exclude_dict = {}
exclude_list=open(exclude_list_file).readlines()
for line in exclude_list:
fields = line.strip().split("/")
text_id = "_".join(fields[-4:-2])
time_stamp = fields[-2][-8:]
audio_id = fields[-1]
if not exclude_dict.has_key(text_id):
exclude_dict[text_id]=[]
exclude_dict[text_id].append(time_stamp + "_" + audio_id)
return exclude_dict
def set_wav(wav_dict, score_dict, exclude_dict):
for text_id in wav_dict.keys():
if (not score_dict.has_key(text_id)) or (len(exclude_dict[text_id])==len(score_dict[text_id])):
log.Error("we don't have audio for the text_id:%s"%text_id)
for wav_id, score in score_dict[text_id].items():
if not wav_id in exclude_dict[text_id]:
wav_dict[text_id].setWavList(wav_id, score)
def select_wav(wav_dict, score_low, score_high, num, output_dir):
uttscpfid = open(output_dir + "/utter.list", "w")
wavscpfid = open(output_dir + "/wav.scp", "w")
textfid = open(output_dir + "/text", "w")
for text_id in wav_dict:
selected_wav_list = wav_dict[text_id].getWav(score_low, score_high, num)
for i in range(len(selected_wav_list)):
utt_id = text_id + "_" + str(i).zfill(2)
text = wav_dict[text_id].getText()
text_dir = "/".join(text_id.split("_"))
utt_file = "SystemLogInfo" + "/".join(selected_wav_list[i].split("_"))
wav_file = "SystemLogInfo" + "/".join(selected_wav_list[i].split("_")) + ".wav"
uttscpfid.writelines(text_dir + "/" + utt_file + "\n")
wavscpfid.writelines(utt_id + " " + text_dir + "/" + wav_file + "\n")
textfid.writelines(utt_id + " " + text + "\n")
if __name__=="__main__":
if(len(sys.argv) < 8):
print("USAGE: python " + sys.argv[0] + " text jsonfile.list exclude_list score_low score_high num output_dir")
exit(1)
wav_dict = build_wav_dict(sys.argv[1])
score_dict = build_score_dict(sys.argv[2])
exclude_dict = build_exclude_list(sys.argv[3])
set_wav(wav_dict, score_dict, exclude_dict)
score_low = float(sys.argv[4])
score_high = float(sys.argv[5])
number = int(sys.argv[6])
output_dir = sys.argv[7]
select_wav(wav_dict, score_low, score_high, number, output_dir)
```
#### File: XY_QByE_STD/script/prepare_template.py
```python
import sys
import os
import random
def sampling(source_list_dict, select_num):
selected_list_dict={}
for keyword in source_list_dict.keys():
actual_num = min(len(source_list_dict[keyword]), select_num)
selected_list = random.sample(source_list_dict[keyword], actual_num)
selected_list_dict[keyword] = selected_list
return selected_list_dict
def mkdir(path):
is_exists = os.path.exists(path)
if not is_exists:
os.makedirs(path)
def get_file(path, suffix):
file_list = []
items = os.listdir(path)
for x in items:
if os.path.isfile(path + "/" + x) and x.endswith(suffix):
file_list.append(path + "/" + x)
#print(path + "/" + x)
elif os.path.isdir(path + "/" + x):
file_list += get_file(path + "/" + x, suffix)
return file_list
if __name__=="__main__":
if(len(sys.argv)<3):
print("UDAGE: python "+ sys.argv[0]+ " keyword_all_list_file max_template_num random_num keyword_selected_list_file")
exit(1)
source_list_file = sys.argv[1]
select_num = int(sys.argv[2])
random_num = int(sys.argv[3])
selected_list_file_fake = sys.argv[4]
source_list_dict = {}
for item in open(source_list_file).readlines():
keyword = item.strip().split("_")[0]
if not source_list_dict.has_key(keyword):
source_list_dict[keyword] = []
source_list_dict[keyword].append(item)
#for keyword in source_list_dict.keys():
# print("%s %d\n"%(keyword, len(source_list_dict[keyword])))
for i in range(1, random_num+1):
selected_list_dict = sampling(source_list_dict, select_num)
selected_list_file_real = selected_list_file_fake.replace("XXX", str(i))
fid = open(selected_list_file_real, "w")
for keyword in selected_list_dict.keys():
for item in selected_list_dict[keyword]:
fid.writelines(item)
fid.close()
```
#### File: XY_QByE_STD/script/score_fusion.py
```python
import sys
import numpy as np
def m_norm(scorelist):
hist, bin_edges = np.histogram(scorelist,40)
index = hist.argmax();
peak = (bin_edges[index] + bin_edges[index+1])/2
slist_peak = np.array([x for x in scorelist if x >= peak])
scorelist = (scorelist - peak)/slist_peak.std()
return scorelist
def z_norm(scorelist):
#scorelist = (np.array(scorelist)-min(scorelist))/(max(scorelist)-min(scorelist))
mean = np.mean(scorelist)
std = np.std(scorelist)
print std
scorelist = (np.array(scorelist)-mean)/std
return scorelist
def min_max_norm(scorelist):
min_v = min(scorelist)
max_v = max(scorelist)
if (max_v-min_v)>0.00001:
norm_scorelist = (np.array(scorelist)-min_v)/(max_v-min_v)
else:
max_v-min_v
norm_scorelist = np.array(scorelist)
return norm_scorelist
def get_score_all_list(result_dir, keyword_list):
score_all_list = []
for keyword in keyword_list:
result_fid = open(result_dir + keyword.strip() + ".RESULT")
result_list = result_fid.readlines()
result_fid.close()
score_list = []
for res in result_list:
score = float(res.strip().split()[0])
score_list.append(score)
score_all_list.append(score_list)
return score_all_list
def fusion(score_all_list1, score_all_list2, factor):
if len(score_all_list1) != len(score_all_list2):
print("Error: bad length of score list")
fusion_score_all_list = []
for i in range(len(score_all_list1)):
#norm_score_list1 = m_norm(score_all_list1[i])
#norm_score_list2 = m_norm(score_all_list2[i])
norm_score_list1 = min_max_norm(score_all_list1[i])
norm_score_list2 = min_max_norm(score_all_list2[i])
fusion_score_list = []
for j in range(len(norm_score_list1)):
#if norm_score_list2[j] > 1:
# fusion_score_list.append(norm_score_list1[j])
#else:
# fusion_score_list.append(factor*norm_score_list1[j]+(1-factor)*norm_score_list2[j])
fusion_score_list.append(factor*norm_score_list1[j]+(1-factor)*norm_score_list2[j])
fusion_score_all_list.append(fusion_score_list)
return fusion_score_all_list
if __name__=="__main__":
if len(sys.argv) < 6:
print("UDAGE: python "+ sys.argv[0]+ "keyword_list_file result_dir1 result_dir2 fusion_dir")
exit(1)
keyword_list = open(sys.argv[1]).readlines()
result_dir1 = sys.argv[2]
result_dir2 = sys.argv[3]
factor = float(sys.argv[4])
fusion_dir = sys.argv[5]
score_all_list1 = get_score_all_list(result_dir1, keyword_list)
score_all_list2 = get_score_all_list(result_dir2, keyword_list)
fusion_score_all_list = fusion(score_all_list1, score_all_list2, factor)
for i in range(len(keyword_list)):
fid = open(fusion_dir + keyword_list[i].strip() + ".RESULT","w")
for x in fusion_score_all_list[i]:
fid.writelines("%f\n"%x)
fid.close()
``` |
{
"source": "Jingyu6/forl_2021",
"score": 2
} |
#### File: forl_2021/algos/cqgp.py
```python
from typing import Any, Dict, Optional, Sequence
from d3rlpy.constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from d3rlpy.dataset import TransitionMiniBatch
from d3rlpy.algos.base import AlgoBase
from .sklearn_impl.cqgp_impl import CQGPImpl
class CQGP(AlgoBase):
_impl: Optional[CQGPImpl]
_gamma: float
_max_buffer_size: int
_q_std_multiplier: int
def __init__(
self,
*,
batch_size: int = 32,
n_steps: int = 1,
gamma: float = 0.99,
impl: Optional[CQGPImpl] = None,
max_buffer_size: int = 1000,
q_std_multiplier: int = 20,
**kwargs: Any,
):
super().__init__(
batch_size=batch_size,
n_steps=n_steps,
gamma=gamma,
# use only frames = 1 for now
n_frames=1,
# disable scalar functionality for now due to lack of support for np.ndarray
scaler=None,
reward_scaler=None,
kwargs=kwargs,
)
self._impl = impl
self._gamma = gamma
self._max_buffer_size = max_buffer_size
self._q_std_multiplier = q_std_multiplier
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = CQGPImpl(
observation_shape=observation_shape,
action_size=action_size,
gamma=self._gamma,
max_buffer_size=self._max_buffer_size,
q_std_multiplier=self._q_std_multiplier
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
loss = self._impl.update(batch)
return { "loss": loss }
def get_action_type(self) -> ActionSpace:
return ActionSpace.DISCRETE
```
#### File: algos/sklearn_impl/base.py
```python
import numpy as np
from typing import Optional, Sequence, Any, Union, List, Tuple
from d3rlpy.algos.base import AlgoImplBase
class SklearnImplBase(AlgoImplBase):
_observation_shape: Sequence[int]
_action_size: int
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
):
self._observation_shape = observation_shape
self._action_size = action_size
def predict_best_action(self, states: Union[np.ndarray, List[Any]]) -> np.ndarray:
if isinstance(states, np.ndarray):
assert states.ndim > 1, "Input must have batch dimension."
return self._predict_best_action(states)
else:
# later use a decorator for conversion
states = np.array(states)
return self._predict_best_action(states)
def _predict_best_action(self, states: np.ndarray) -> np.ndarray:
raise NotImplementedError
def sample_action(self, states: Union[np.ndarray, List[Any]]) -> np.ndarray:
if isinstance(states, np.ndarray):
assert states.ndim > 1, "Input must have batch dimension."
return self._sample_action(states)
else:
# later use a decorator for conversion
raise TypeError
def _sample_action(self, states: np.ndarray) -> np.ndarray:
raise NotImplementedError
def predict_value(
self,
states: Union[np.ndarray, List[Any]],
actions: Union[np.ndarray, List[Any]],
with_std: bool = False,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
if isinstance(states, np.ndarray) and isinstance(actions, np.ndarray):
assert states.ndim > 1, "Input must have batch dimension."
return self._predict_value(states, actions, with_std)
else:
# later use a decorator for conversion
raise TypeError
def _predict_value(
self,
states: np.ndarray,
actions: np.ndarray,
with_std: bool,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
raise NotImplementedError
""" From ImplBase """
def save_model(self, fname: str) -> None:
pass
def load_model(self, fname: str) -> None:
pass
@property
def observation_shape(self) -> Sequence[int]:
return self._observation_shape
@property
def action_size(self) -> int:
return self._action_size
""" From AlgoImplBase """
def save_policy(self, fname: str, as_onnx: bool) -> None:
pass
def copy_policy_from(self, impl: AlgoImplBase) -> None:
pass
def copy_policy_optim_from(self, impl: AlgoImplBase) -> None:
pass
def copy_q_function_from(self, impl: AlgoImplBase) -> None:
pass
def copy_q_function_optim_from(self, impl: AlgoImplBase) -> None:
pass
def reset_optimizer_states(self) -> None:
pass
```
#### File: Jingyu6/forl_2021/run.py
```python
import os
import shutil
import random
import argparse
import d3rlpy
import matplotlib.pyplot as plt
import numpy as np
from d3rlpy.algos import DQN, DiscreteCQL, DiscreteBC, DiscreteSAC, DiscreteBCQ
from d3rlpy.metrics import evaluate_on_environment
from algos.cqgp import CQGP
from datasets import get_episodes
from visualization.data_plot import plot_records_in_dir
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--env", type=str, default='cartpole-random', choices=[
'cartpole-replay', 'cartpole-random', 'acrobot-replay'])
parser.add_argument("-t", "--testing", action="store_true")
parser.add_argument("-n", "--num_of_trials", type=int, default=5)
parser.add_argument("-x", "--num_of_episodes", type=int, default=20)
parser.add_argument("-i", "--num_of_epochs", type=int, default=25)
parser.add_argument("-m", "--q_std_multiplier", type=int, default=20)
args = parser.parse_args()
print("Start experiments: ", args)
if args.testing:
LOG_DIR = os.path.join('d3rlpy_logs/test', args.env)
if os.path.isdir(LOG_DIR):
shutil.rmtree(LOG_DIR)
else:
LOG_DIR = os.path.join('d3rlpy_logs/', args.env)
ALGOS = [CQGP, DQN, DiscreteCQL, DiscreteBC, DiscreteSAC, DiscreteBCQ]
def set_random_seed(s):
d3rlpy.seed(t)
random.seed(t)
np.random.seed(t)
for t in range(args.num_of_trials):
set_random_seed(t + 227 + 1998)
# obtain dataset
episodes, env = get_episodes(args.env, args.num_of_episodes)
env.seed(t)
transition_len = sum([len(episode) for episode in episodes])
print('Total size: ', transition_len)
for algo in ALGOS:
# setup algorithm
agent = algo(max_buffer_size=transition_len, q_std_multiplier=args.q_std_multiplier)
agent.fit(
episodes,
eval_episodes=[None], # dummy
n_epochs=args.num_of_epochs,
scorers={
'rewards': evaluate_on_environment(env)
},
logdir=LOG_DIR
)
# plot the results
plot_records_in_dir(log_dir=LOG_DIR, env_name=args.env, value_description='rewards')
```
#### File: forl_2021/visualization/data_parser.py
```python
import pandas as pd # type: ignore
from pathlib import Path
from typing import List, Dict, Any, Optional, Union, Callable, Tuple
from abc import ABC, abstractmethod
class Records:
""" class representing data for plotting """
def __init__(
self,
epochs: List[int],
steps: List[int],
values: List[int],
algo_name: Optional[str],
trial_name: Optional[str],
value_description: str
):
self._algo_name = algo_name
self._trial_name = trial_name
self._value_description = value_description
self._epochs = epochs
self._steps = steps
self._values = values
def __repr__(self) -> str:
return 'Data records of {} for algorithm {}, experiment {}, total length {}.'.format(
self._value_description,
self._algo_name,
self._trial_name,
self.__len__()
)
def __len__(self) -> int:
return len(self._epochs)
def __index__(self, idx) -> Dict[str, Any]:
if idx < self.__len__():
return dict(epoch=self._epochs[idx], step=self._steps[idx], value=self._values[idx])
return {}
def get_data(self, max_len: int = None) -> Dict[str, List[int]]:
max_len = max_len or self.__len__()
return dict(epochs=self._epochs[:max_len], steps=self._steps[:max_len], values=self._values[:max_len])
@property
def algo_name(self):
return self._algo_name
@property
def trial_name(self):
return self._trial_name
@property
def value_description(self):
return self._value_description
class DataParser(ABC):
""" base class dealing with parsing logics """
def __init__(self):
pass
@abstractmethod
def parse(self,
data_source: Any,
algo_name: str,
trial_name: str,
value_description: str
) -> Records:
pass
class CSVDataParser(DataParser):
""" Generic CSV parser into format Records """
def __init__(self):
super(CSVDataParser, self).__init__()
def parse(
self,
csv_path: str,
algo_name: str = None,
trial_name: str = None,
value_description: str = 'value'
) -> Records:
print(csv_path)
assert Path(csv_path).is_file(), 'Invalid csv file path.'
data = pd.read_csv(csv_path, names=['epoch', 'step', value_description])
parsed_data = Records(
data['epoch'].tolist(),
data['step'].tolist(),
data[value_description].tolist(),
algo_name,
trial_name,
value_description
)
return parsed_data
class D3rlpyCSVDataParser(CSVDataParser):
""" D3rlpy CSV parser that extracts algo name and experiment automatically """
def __init__(self, parse_experiment_name: Callable[[str], Tuple[str, str, str]] = None):
super(D3rlpyCSVDataParser, self).__init__()
self._parse_experiment_name = parse_experiment_name
def parse(
self,
log_dir: str,
algo_name: str = None,
trial_name: str = None,
value_description: str = 'loss'
) -> Records:
# the log dir path should have a folder name like ${ALGO}_${EXPERIMENT}
log_dir_path = Path(log_dir)
base_name = log_dir_path.name
assert log_dir_path.is_dir(), 'Invalid log directory.'
assert '_' in base_name, 'The folder should have ALGONAME_EXPERIMENTNAME format.'
delim_idx = base_name.find('_')
if self._parse_experiment_name:
algo_name, _, trial_name = self._parse_experiment_name(base_name)
else:
algo_name, trial_name = algo_name or base_name[:delim_idx], trial_name or base_name[delim_idx+1:]
csv_path = log_dir_path / (value_description + '.csv')
return super().parse(
str(csv_path),
algo_name,
trial_name,
value_description
)
```
#### File: forl_2021/visualization/data_plot.py
```python
import gym # type: ignore
import matplotlib # type: ignore
import matplotlib.pyplot as plt # type: ignore
import numpy as np
from pathlib import Path # type: ignore
from typing import List, Union, Literal, Dict, Any
from visualization.data_parser import Records, D3rlpyCSVDataParser
def plot_records_list(
axes: matplotlib.axes.Axes,
records_list: List[Records],
env_name: str,
value_description: str = 'loss',
horizon_name: Union[Literal['epochs', 'steps']] = 'epochs',
**kwargs: Any # arguments to the plot function
) -> None:
"""
Plot the graph of different algorithms,
each algorithm contains multiple experiments,
all experiments are from the same environment
"""
assert len(records_list) > 0, "Can not pass in empty records."
# group them together
algo_to_records: Dict[str, List[Records]] = {}
for records in records_list:
algo_name = records.algo_name
if algo_name not in algo_to_records:
algo_to_records[algo_name] = []
algo_to_records[algo_name].append(records)
# make sure all algorithms have the same number of experiments
experiment_counts = set([len(data) for data in algo_to_records.values()])
assert len(experiment_counts) == 1, \
"All algorithms should have the same number of experiments"
# truncate horizon (assuming monotonic increasing)
min_horizon = min([len(records.get_data()[horizon_name]) for records in records_list])
for algo_name in sorted(algo_to_records.keys()):
print(algo_name)
algo_records_list = algo_to_records[algo_name]
horizon = algo_records_list[0].get_data(min_horizon)[horizon_name]
values = np.array([records.get_data(min_horizon)['values'] for records in algo_records_list])
value_mean = np.mean(values, axis=0)
value_std = np.std(values, axis=0)
axes.plot(horizon, value_mean, **kwargs)
axes.fill_between(horizon, value_mean - value_std, value_mean + value_std, alpha=0.2, interpolate=True)
axes.set_title('{}: {} plots of {} over {} trials'.format(
env_name, value_description, horizon_name, next(iter(experiment_counts))))
axes.set_ylabel(value_description)
axes.set_xlabel(horizon_name)
axes.legend(sorted(list(algo_to_records.keys())))
def plot_records_in_dir(
log_dir: str,
env_name: str,
value_description: str = 'loss',
horizon_name: Union[Literal['epochs', 'steps']] = 'epochs',
**kwargs: Any
) -> None:
log_dir_path = Path(log_dir)
assert log_dir_path.is_dir(), "Invalid log dir."
parser = D3rlpyCSVDataParser()
records_list: List[Records] = []
for sub_dir in log_dir_path.iterdir():
records_list.append(parser.parse(str(sub_dir), value_description=value_description))
plot_records_list(plt.gca(), records_list, env_name, value_description, horizon_name, **kwargs)
plt.show()
``` |
{
"source": "jingyuanchan/HERO_Video_Feature_Extractor",
"score": 2
} |
#### File: HERO_Video_Feature_Extractor/mil-nce/model.py
```python
import torch as th
from s3dg import S3D
def build_model(args):
print(f'Loading S3D with checkpoint {args.s3d_ckpt}...')
model = S3D()
model = model.cuda()
model_data = th.load(args.s3d_ckpt)
model.load_state_dict(model_data, strict=False)
model.eval()
return model
```
#### File: slowfast/extract_feature/model.py
```python
import slowfast.utils.checkpoint as cu
from slowfast.models import model_builder
def build_model(cfg):
"""
Build slowfast model
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Build the video model and print model statistics.
model = model_builder.build_model(cfg)
# Load a checkpoint to test if applicable.
if cfg.TEST.CHECKPOINT_FILE_PATH != "":
cu.load_checkpoint(
cfg.TEST.CHECKPOINT_FILE_PATH,
model,
cfg.NUM_GPUS > 1,
None,
inflation=False,
convert_from_caffe2=cfg.TEST.CHECKPOINT_TYPE == "caffe2",
)
elif cu.has_checkpoint(cfg.OUTPUT_DIR):
last_checkpoint = cu.get_last_checkpoint(cfg.OUTPUT_DIR)
cu.load_checkpoint(last_checkpoint, model, cfg.NUM_GPUS > 1)
elif cfg.TRAIN.CHECKPOINT_FILE_PATH != "":
# If no checkpoint found in TEST.CHECKPOINT_FILE_PATH or in the current
# checkpoint folder, try to load checkpint from
# TRAIN.CHECKPOINT_FILE_PATH and test it.
cu.load_checkpoint(
cfg.TRAIN.CHECKPOINT_FILE_PATH,
model,
cfg.NUM_GPUS > 1,
None,
inflation=False,
convert_from_caffe2=cfg.TRAIN.CHECKPOINT_TYPE == "caffe2",
)
else:
# raise NotImplementedError("Unknown way to load checkpoint.")
print("Testing with random initialization. Only for debugging.")
return model
```
#### File: slowfast/extract_feature/yuv_reader.py
```python
import torch
import numpy as np
import cv2
class YuvRgbConverter(object):
def __init__(self, device=torch.device('cuda')):
self.T = torch.tensor(
[[1.164, 1.164, 1.164],
[0, -0.392, 2.017],
[1.596, -0.813, 0]],
requires_grad=False
).to(device)
self.bias = torch.tensor([-16, -128, -128],
requires_grad=False).float().to(device)
@torch.no_grad()
def __call__(self, yuv):
rgb = (yuv+self.bias).matmul(self.T).detach().clamp_(0, 255)
return rgb
def read_y(w, h, binary):
x = np.frombuffer(binary, dtype=np.uint8).reshape((h, w))
return torch.from_numpy(x).float()
def read_uv(w, h, binary):
x = np.frombuffer(binary, dtype=np.uint8
).reshape((h//2, w//2))
x = cv2.resize(x, (w, h), cv2.INTER_NEAREST)
return torch.from_numpy(x).float()
def read_yuv420p(binary, w, h):
assert w % 2 == h % 2 == 0
tot_len = len(binary)
frame_length = w*h*6//4
n_frame = tot_len // frame_length
n_pix = w*h
uv_len = n_pix // 4
yuv = torch.Tensor(n_frame, h, w, 3)
y_starts = range(0, tot_len, frame_length)
for i, st in enumerate(y_starts):
yuv.data[i, ..., 0] = read_y(w, h, binary[st: st+n_pix]).data
u_starts = range(n_pix, tot_len, frame_length)
for i, st in enumerate(u_starts):
yuv.data[i, ..., 1] = read_uv(w, h, binary[st: st+uv_len]).data
v_starts = range(n_pix+uv_len, tot_len, frame_length)
for i, st in enumerate(v_starts):
yuv.data[i, ..., 2] = read_uv(w, h, binary[st: st+uv_len]).data
return yuv
``` |
{
"source": "jingyuanchan/Pose_Baseline",
"score": 2
} |
#### File: Pose_Baseline/misc/utils.py
```python
import cv2
import munkres
import numpy as np
import torch
# solution proposed in https://github.com/pytorch/pytorch/issues/229#issuecomment-299424875
def flip_tensor(tensor, dim=0):
"""
flip the tensor on the dimension dim
"""
inv_idx = torch.arange(tensor.shape[dim] - 1, -1, -1).to(tensor.device)
return tensor.index_select(dim, inv_idx)
#
# derived from https://github.com/leoxiaobin/deep-high-resolution-net.pytorch
def flip_back(output_flipped, matched_parts):
assert len(output_flipped.shape) == 4, 'output_flipped has to be [batch_size, num_joints, height, width]'
output_flipped = flip_tensor(output_flipped, dim=-1)
for pair in matched_parts:
tmp = output_flipped[:, pair[0]].clone()
output_flipped[:, pair[0]] = output_flipped[:, pair[1]]
output_flipped[:, pair[1]] = tmp
return output_flipped
def fliplr_joints(joints, joints_vis, width, matched_parts):
# Flip horizontal
joints[:, 0] = width - joints[:, 0] - 1
# Change left-right parts
for pair in matched_parts:
joints[pair[0], :], joints[pair[1], :] = \
joints[pair[1], :], joints[pair[0], :].copy()
joints_vis[pair[0], :], joints_vis[pair[1], :] = \
joints_vis[pair[1], :], joints_vis[pair[0], :].copy()
return joints * joints_vis, joints_vis
def get_affine_transform(center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32), inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
print(scale)
scale = np.array([scale, scale])
scale_tmp = scale * 1.0 * 200.0 # It was scale_tmp = scale * 200.0
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def crop(img, center, scale, output_size, rot=0, interpolation=cv2.INTER_LINEAR):
trans = get_affine_transform(center, scale, rot, output_size)
dst_img = cv2.warpAffine(
img, trans, (int(output_size[0]), int(output_size[1])),
flags=interpolation
)
return dst_img
#
#
#
# derived from https://github.com/leoxiaobin/deep-high-resolution-net.pytorch
def calc_dists(preds, target, normalize):
preds = preds.type(torch.float32)
target = target.type(torch.float32)
dists = torch.zeros((preds.shape[1], preds.shape[0])).to(preds.device)
for n in range(preds.shape[0]):
for c in range(preds.shape[1]):
if target[n, c, 0] > 1 and target[n, c, 1] > 1:
normed_preds = preds[n, c, :] / normalize[n]
normed_targets = target[n, c, :] / normalize[n]
# # dists[c, n] = np.linalg.norm(normed_preds - normed_targets)
dists[c, n] = torch.norm(normed_preds - normed_targets)
else:
dists[c, n] = -1
return dists
def dist_acc(dists, thr=0.5):
"""
Return percentage below threshold while ignoring values with a -1
"""
dist_cal = torch.ne(dists, -1)
num_dist_cal = dist_cal.sum()
if num_dist_cal > 0:
return torch.lt(dists[dist_cal], thr).float().sum() / num_dist_cal
else:
return -1
def evaluate_pck_accuracy(output, target, hm_type='gaussian', thr=0.5):
"""
Calculate accuracy according to PCK,
but uses ground truth heatmap rather than x,y locations
First value to be returned is average accuracy across 'idxs',
followed by individual accuracies
"""
idx = list(range(output.shape[1]))
if hm_type == 'gaussian':
pred, _ = get_max_preds(output)
target, _ = get_max_preds(target)
h = output.shape[2]
w = output.shape[3]
norm = torch.ones((pred.shape[0], 2)) * torch.tensor([h, w],
dtype=torch.float32) / 10 # Why they divide this by 10?
norm = norm.to(output.device)
else:
raise NotImplementedError
dists = calc_dists(pred, target, norm)
acc = torch.zeros(len(idx)).to(dists.device)
avg_acc = 0
cnt = 0
for i in range(len(idx)):
acc[i] = dist_acc(dists[idx[i]], thr=thr)
if acc[i] >= 0:
avg_acc = avg_acc + acc[i]
cnt += 1
avg_acc = avg_acc / cnt if cnt != 0 else 0
return acc, avg_acc, cnt, pred, target
#
#
#
# Operations on bounding boxes (rectangles)
def bbox_area(bbox):
"""
Area of a bounding box (a rectangles).
Args:
bbox (:class:`np.ndarray`): rectangle in the form (x_min, y_min, x_max, y_max)
Returns:
float: Bounding box area.
"""
x1, y1, x2, y2 = bbox
dx = x2 - x1
dy = y2 - y1
return dx * dy
def bbox_intersection(bbox_a, bbox_b):
"""
Intersection between two buonding boxes (two rectangles).
Args:
bbox_a (:class:`np.ndarray`): rectangle in the form (x_min, y_min, x_max, y_max)
bbox_b (:class:`np.ndarray`): rectangle in the form (x_min, y_min, x_max, y_max)
Returns:
(:class:`np.ndarray`, float):
Intersection limits and area.
Format: (x_min, y_min, x_max, y_max), area
"""
x1 = np.max((bbox_a[0], bbox_b[0])) # Left
x2 = np.min((bbox_a[2], bbox_b[2])) # Right
y1 = np.max((bbox_a[1], bbox_b[1])) # Top
y2 = np.min((bbox_a[3], bbox_b[3])) # Bottom
if x2 < x1 or y2 < y1:
bbox_i = np.asarray([0, 0, 0, 0])
area_i = 0
else:
bbox_i = np.asarray([x1, y1, x2, y2], dtype=bbox_a.dtype)
area_i = bbox_area(bbox_i)
return bbox_i, area_i
def bbox_union(bbox_a, bbox_b):
"""
Union between two buonding boxes (two rectangles).
Args:
bbox_a (:class:`np.ndarray`): rectangle in the form (x_min, y_min, x_max, y_max)
bbox_b (:class:`np.ndarray`): rectangle in the form (x_min, y_min, x_max, y_max)
Returns:
float: Union.
"""
area_a = bbox_area(bbox_a)
area_b = bbox_area(bbox_b)
bbox_i, area_i = bbox_intersection(bbox_a, bbox_b)
area_u = area_a + area_b - area_i
return area_u
def bbox_iou(bbox_a, bbox_b):
"""
Intersection over Union (IoU) between two buonding boxes (two rectangles).
Args:
bbox_a (:class:`np.ndarray`): rectangle in the form (x_min, y_min, x_max, y_max)
bbox_b (:class:`np.ndarray`): rectangle in the form (x_min, y_min, x_max, y_max)
Returns:
float: Intersection over Union (IoU).
"""
area_u = bbox_union(bbox_a, bbox_b)
bbox_i, area_i = bbox_intersection(bbox_a, bbox_b)
iou = area_i / area_u
return iou
#
#
#
# Bounding box/pose similarity and association
def oks_iou(g, d, a_g, a_d, sigmas=None, in_vis_thre=None):
if not isinstance(sigmas, np.ndarray):
sigmas = np.array(
[.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89]) / 10.0
vars = (sigmas * 2) ** 2
xg = g[:, 0]
yg = g[:, 1]
vg = g[:, 2]
ious = np.zeros((d.shape[0]))
for n_d in range(0, d.shape[0]):
xd = d[n_d, :, 0]
yd = d[n_d, :, 1]
vd = d[n_d, :, 2]
dx = xd - xg
dy = yd - yg
e = (dx ** 2 + dy ** 2) / vars / ((a_g + a_d[n_d]) / 2 + np.spacing(1)) / 2
if in_vis_thre is not None:
ind = list(vg > in_vis_thre) and list(vd > in_vis_thre)
e = e[ind]
ious[n_d] = np.sum(np.exp(-e)) / e.shape[0] if e.shape[0] != 0 else 0.0
return ious
def compute_similarity_matrices(bboxes_a, bboxes_b, poses_a, poses_b):
assert len(bboxes_a) == len(poses_a) and len(bboxes_b) == len(poses_b)
result_bbox = np.zeros((len(bboxes_a), len(bboxes_b)), dtype=np.float32)
result_pose = np.zeros((len(poses_a), len(poses_b)), dtype=np.float32)
for i, (bbox_a, pose_a) in enumerate(zip(bboxes_a, poses_a)):
area_bboxes_b = np.asarray([bbox_area(bbox_b) for bbox_b in bboxes_b])
result_pose[i, :] = oks_iou(pose_a, poses_b, bbox_area(bbox_a), area_bboxes_b)
for j, (bbox_b, pose_b) in enumerate(zip(bboxes_b, poses_b)):
result_bbox[i, j] = bbox_iou(bbox_a, bbox_b)
return result_bbox, result_pose
def find_person_id_associations(boxes, pts, prev_boxes, prev_pts, prev_person_ids, next_person_id=0,
pose_alpha=0.5, similarity_threshold=0.5, smoothing_alpha=0.):
"""
Find associations between previous and current skeletons and apply temporal smoothing.
It requires previous and current bounding boxes, skeletons, and previous person_ids.
Args:
boxes (:class:`np.ndarray`): current person bounding boxes
pts (:class:`np.ndarray`): current human joints
prev_boxes (:class:`np.ndarray`): previous person bounding boxes
prev_pts (:class:`np.ndarray`): previous human joints
prev_person_ids (:class:`np.ndarray`): previous person ids
next_person_id (int): the id that will be assigned to the next novel detected person
Default: 0
pose_alpha (float): parameter to weight between bounding box similarity and pose (oks) similarity.
pose_alpha * pose_similarity + (1 - pose_alpha) * bbox_similarity
Default: 0.5
similarity_threshold (float): lower similarity threshold to have a correct match between previous and
current detections.
Default: 0.5
smoothing_alpha (float): linear temporal smoothing filter. Set 0 to disable, 1 to keep the previous detection.
Default: 0.1
Returns:
(:class:`np.ndarray`, :class:`np.ndarray`, :class:`np.ndarray`):
A list with (boxes, pts, person_ids) where boxes and pts are temporally smoothed.
"""
bbox_similarity_matrix, pose_similarity_matrix = compute_similarity_matrices(boxes, prev_boxes, pts, prev_pts)
similarity_matrix = pose_similarity_matrix * pose_alpha + bbox_similarity_matrix * (1 - pose_alpha)
m = munkres.Munkres()
assignments = np.asarray(m.compute((1 - similarity_matrix).tolist())) # Munkres require a cost => 1 - similarity
person_ids = np.ones(len(pts), dtype=np.int32) * -1
for assignment in assignments:
if similarity_matrix[assignment[0], assignment[1]] > similarity_threshold:
person_ids[assignment[0]] = prev_person_ids[assignment[1]]
if smoothing_alpha:
boxes[assignment[0]] = (1 - smoothing_alpha) * boxes[assignment[0]] + \
smoothing_alpha * prev_boxes[assignment[1]]
pts[assignment[0]] = (1 - smoothing_alpha) * pts[assignment[0]] + \
smoothing_alpha * prev_pts[assignment[1]]
person_ids[person_ids == -1] = np.arange(next_person_id, next_person_id + np.sum(person_ids == -1))
return boxes, pts, person_ids
#
#
#
# derived from https://github.com/HRNet/HigherHRNet-Human-Pose-Estimation
def get_multi_stage_outputs(model, image,
with_flip=False, project2image=False, size_projected=None,
nof_joints=17, max_batch_size=128):
heatmaps_avg = 0
num_heatmaps = 0
heatmaps = []
tags = []
# inference
# outputs is a list with (default) shape
# [(batch, nof_joints*2, height//4, width//4), (batch, nof_joints, height//2, width//2)]
# but it could also be (no checkpoints with this configuration)
# [(batch, nof_joints*2, height//4, width//4), (batch, nof_joints*2, height//2, width//2), (batch, nof_joints, height, width)]
if len(image) <= max_batch_size:
outputs = model(image)
else:
outputs = [
torch.empty((image.shape[0], nof_joints * 2, image.shape[-2] // 4, image.shape[-1] // 4),
device=image.device),
torch.empty((image.shape[0], nof_joints, image.shape[-2] // 2, image.shape[-1] // 2),
device=image.device)
]
for i in range(0, len(image), max_batch_size):
out = model(image[i:i + max_batch_size])
outputs[0][i:i + max_batch_size] = out[0]
outputs[1][i:i + max_batch_size] = out[1]
# get higher output resolution
higher_resolution = (outputs[-1].shape[-2], outputs[-1].shape[-1])
for i, output in enumerate(outputs):
if i != len(outputs) - 1:
output = torch.nn.functional.interpolate(
output,
size=higher_resolution,
mode='bilinear',
align_corners=False
)
heatmaps_avg += output[:, :nof_joints]
num_heatmaps += 1
if output.shape[1] > nof_joints:
tags.append(output[:, nof_joints:])
if num_heatmaps > 0:
heatmaps.append(heatmaps_avg / num_heatmaps)
if with_flip: # ToDo
raise NotImplementedError
# if 'coco' in cfg.DATASET.DATASET:
# dataset_name = 'COCO'
# elif 'crowd_pose' in cfg.DATASET.DATASET:
# dataset_name = 'CROWDPOSE'
# else:
# raise ValueError('Please implement flip_index for new dataset: %s.' % cfg.DATASET.DATASET)
# flip_index = FLIP_CONFIG[dataset_name + '_WITH_CENTER'] \
# if cfg.DATASET.WITH_CENTER else FLIP_CONFIG[dataset_name]
#
# heatmaps_avg = 0
# num_heatmaps = 0
# outputs_flip = model(torch.flip(image, [3]))
# for i in range(len(outputs_flip)):
# output = outputs_flip[i]
# if len(outputs_flip) > 1 and i != len(outputs_flip) - 1:
# output = torch.nn.functional.interpolate(
# output,
# size=(outputs_flip[-1].size(2), outputs_flip[-1].size(3)),
# mode='bilinear',
# align_corners=False
# )
# output = torch.flip(output, [3])
# outputs.append(output)
#
# offset_feat = cfg.DATASET.NUM_JOINTS \
# if cfg.LOSS.WITH_HEATMAPS_LOSS[i] else 0
#
# if cfg.LOSS.WITH_HEATMAPS_LOSS[i] and cfg.TEST.WITH_HEATMAPS[i]:
# heatmaps_avg += \
# output[:, :cfg.DATASET.NUM_JOINTS][:, flip_index, :, :]
# num_heatmaps += 1
#
# if cfg.LOSS.WITH_AE_LOSS[i] and cfg.TEST.WITH_AE[i]:
# tags.append(output[:, offset_feat:])
# if cfg.MODEL.TAG_PER_JOINT:
# tags[-1] = tags[-1][:, flip_index, :, :]
#
# heatmaps.append(heatmaps_avg/num_heatmaps)
if project2image and size_projected:
heatmaps = [
torch.nn.functional.interpolate(
hms,
size=(size_projected[1], size_projected[0]),
mode='bilinear',
align_corners=False
)
for hms in heatmaps
]
tags = [
torch.nn.functional.interpolate(
tms,
size=(size_projected[1], size_projected[0]),
mode='bilinear',
align_corners=False
)
for tms in tags
]
return outputs, heatmaps, tags
def aggregate_results(scale_factor, final_heatmaps, tags_list, heatmaps, tags, with_flip=False, project2image=False):
if scale_factor == 1:
if final_heatmaps is not None and not project2image:
tags = [
torch.nn.functional.interpolate(
tms,
size=(final_heatmaps.size(2), final_heatmaps.size(3)),
mode='bilinear',
align_corners=False
)
for tms in tags
]
for tms in tags:
tags_list.append(torch.unsqueeze(tms, dim=4))
heatmaps_avg = (heatmaps[0] + heatmaps[1]) / 2.0 if with_flip else heatmaps[0]
if final_heatmaps is None:
final_heatmaps = heatmaps_avg
elif project2image:
final_heatmaps += heatmaps_avg
else:
final_heatmaps += torch.nn.functional.interpolate(
heatmaps_avg,
size=(final_heatmaps.size(2), final_heatmaps.size(3)),
mode='bilinear',
align_corners=False
)
return final_heatmaps, tags_list
def transform_preds(coords, center, scale, output_size):
# target_coords = np.zeros(coords.shape)
target_coords = coords.copy()
trans = get_affine_transform(center, scale, 0, output_size, inv=1)
for p in range(coords.shape[0]):
target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
return target_coords
def resize(image, input_size, interpolation=cv2.INTER_LINEAR):
h, w, _ = image.shape
center = np.array([int(w / 2.0 + 0.5), int(h / 2.0 + 0.5)])
if w < h:
w_resized = input_size
h_resized = int((input_size / w * h + 63) // 64 * 64)
scale_w = w / 200.0
scale_h = h_resized / w_resized * w / 200.0
else:
h_resized = input_size
w_resized = int((input_size / h * w + 63) // 64 * 64)
scale_h = h / 200.0
scale_w = w_resized / h_resized * h / 200.0
scale = np.array([scale_w, scale_h])
trans = get_affine_transform(center, scale, 0, (w_resized, h_resized))
image_resized = cv2.warpAffine(
image,
trans,
(int(w_resized), int(h_resized)),
flags=interpolation
)
return image_resized, center, scale
def get_multi_scale_size(image, input_size, current_scale, min_scale):
h, w, _ = image.shape
center = np.array([int(w / 2.0 + 0.5), int(h / 2.0 + 0.5)])
# calculate the size for min_scale
min_input_size = int((min_scale * input_size + 63) // 64 * 64)
if w < h:
w_resized = int(min_input_size * current_scale / min_scale)
h_resized = int(
int((min_input_size / w * h + 63) // 64 * 64) * current_scale / min_scale
)
scale_w = w / 200.0
scale_h = h_resized / w_resized * w / 200.0
else:
h_resized = int(min_input_size * current_scale / min_scale)
w_resized = int(
int((min_input_size / h * w + 63) // 64 * 64) * current_scale / min_scale
)
scale_h = h / 200.0
scale_w = w_resized / h_resized * h / 200.0
return (w_resized, h_resized), center, np.array([scale_w, scale_h])
def resize_align_multi_scale(image, input_size, current_scale, min_scale, interpolation=cv2.INTER_LINEAR):
size_resized, center, scale = get_multi_scale_size(
image, input_size, current_scale, min_scale
)
trans = get_affine_transform(center, scale, 0, size_resized)
image_resized = cv2.warpAffine(
image,
trans,
size_resized,
# (int(w_resized), int(h_resized)),
flags=interpolation
)
return image_resized, size_resized, center, scale
def get_final_preds(grouped_joints, center, scale, heatmap_size):
final_results = []
# for each image
for i in range(len(grouped_joints)):
final_results.insert(i, [])
# for each detected person
for person in grouped_joints[i]:
# joints = np.zeros((person.shape[0], 3))
joints = transform_preds(person.cpu().numpy(), center, scale, heatmap_size)
final_results[i].append(joints)
return final_results
#
#
```
#### File: jingyuanchan/Pose_Baseline/SimpleHigherHRNet.py
```python
from collections import OrderedDict
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from models.higherhrnet import HigherHRNet
from misc.HeatmapParser import HeatmapParser
from misc.utils import get_multi_scale_size, resize_align_multi_scale, get_multi_stage_outputs, aggregate_results, get_final_preds, bbox_iou
class SimpleHigherHRNet:
"""
SimpleHigherHRNet class.
The class provides a simple and customizable method to load the HigherHRNet network, load the official pre-trained
weights, and predict the human pose on single images or a batch of images.
"""
def __init__(self,
c,
nof_joints,
checkpoint_path,
model_name='HigherHRNet',
resolution=512,
interpolation=cv2.INTER_LINEAR,
return_heatmaps=False,
return_bounding_boxes=False,
filter_redundant_poses=True,
max_nof_people=30,
max_batch_size=32,
device=torch.device("cpu")):
"""
Initializes a new SimpleHigherHRNet object.
HigherHRNet is initialized on the torch.device("device") and
its pre-trained weights will be loaded from disk.
Args:
c (int): number of channels (when using HigherHRNet model).
nof_joints (int): number of joints.
checkpoint_path (str): path to an official higherhrnet checkpoint.
model_name (str): model name (just HigherHRNet at the moment).
Valid names for HigherHRNet are: `HigherHRNet`, `higherhrnet`
Default: "HigherHRNet"
resolution (int): higherhrnet input resolution - format: int == min(width, height).
Default: 512
interpolation (int): opencv interpolation algorithm.
Default: cv2.INTER_LINEAR
return_heatmaps (bool): if True, heatmaps will be returned along with poses by self.predict.
Default: False
return_bounding_boxes (bool): if True, bounding boxes will be returned along with poses by self.predict.
Default: False
filter_redundant_poses (bool): if True, redundant poses (poses being almost identical) are filtered out.
Default: True
max_nof_people (int): maximum number of detectable people.
Default: 30
max_batch_size (int): maximum batch size used in higherhrnet inference.
Useless without multiperson=True.
Default: 16
device (:class:`torch.device` or str): the higherhrnet (and yolo) inference will be run on this device.
Default: torch.device("cpu")
"""
self.c = c
self.nof_joints = nof_joints
self.checkpoint_path = checkpoint_path
self.model_name = model_name
self.resolution = resolution
self.interpolation = interpolation
self.return_heatmaps = return_heatmaps
self.return_bounding_boxes = return_bounding_boxes
self.filter_redundant_poses = filter_redundant_poses
self.max_nof_people = max_nof_people
self.max_batch_size = max_batch_size
self.device = device
# assert nof_joints in (14, 15, 17)
if self.nof_joints == 14:
self.joint_set = 'crowdpose'
elif self.nof_joints == 15:
self.joint_set = 'mpii'
elif self.nof_joints == 17:
self.joint_set = 'coco'
else:
raise ValueError('Wrong number of joints.')
if model_name in ('HigherHRNet', 'higherhrnet'):
self.model = HigherHRNet(c=c, nof_joints=nof_joints)
else:
raise ValueError('Wrong model name.')
checkpoint = torch.load(checkpoint_path, map_location=self.device)
if 'model' in checkpoint:
checkpoint = checkpoint['model']
# fix issue with official high-resolution weights
checkpoint = OrderedDict([(k[2:] if k[:2] == '1.' else k, v) for k, v in checkpoint.items()])
self.model.load_state_dict(checkpoint)
if 'cuda' in str(self.device):
print("device: 'cuda' - ", end="")
if 'cuda' == str(self.device):
# if device is set to 'cuda', all available GPUs will be used
print("%d GPU(s) will be used" % torch.cuda.device_count())
device_ids = None
else:
# if device is set to 'cuda:IDS', only that/those device(s) will be used
print("GPU(s) '%s' will be used" % str(self.device))
device_ids = [int(x) for x in str(self.device)[5:].split(',')]
self.model = torch.nn.DataParallel(self.model, device_ids=device_ids)
elif 'cpu' == str(self.device):
print("device: 'cpu'")
else:
raise ValueError('Wrong device name.')
self.model = self.model.to(device)
self.model.eval()
self.output_parser = HeatmapParser(num_joints=self.nof_joints,
joint_set=self.joint_set,
max_num_people=self.max_nof_people,
ignore_too_much=True,
detection_threshold=0.3)
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def predict(self, image):
"""
Predicts the human pose on a single image or a stack of n images.
Args:
image (:class:`np.ndarray`):
the image(s) on which the human pose will be estimated.
image is expected to be in the opencv format.
image can be:
- a single image with shape=(height, width, BGR color channel)
- a stack of n images with shape=(n, height, width, BGR color channel)
Returns:
:class:`np.ndarray` or list:
a numpy array containing human joints for each (detected) person.
Format:
if image is a single image:
shape=(# of people, # of joints (nof_joints), 3); dtype=(np.float32).
if image is a stack of n images:
list of n np.ndarrays with
shape=(# of people, # of joints (nof_joints), 3); dtype=(np.float32).
Each joint has 3 values: (y position, x position, joint confidence).
If self.return_heatmaps, the class returns a list with (heatmaps, human joints)
If self.return_bounding_boxes, the class returns a list with (bounding boxes, human joints)
If self.return_heatmaps and self.return_bounding_boxes, the class returns a list with
(heatmaps, bounding boxes, human joints)
"""
if len(image.shape) == 3:
return self._predict_single(image)
elif len(image.shape) == 4:
return self._predict_batch(image)
else:
raise ValueError('Wrong image format.')
def _predict_single(self, image):
ret = self._predict_batch(image[None, ...])
if len(ret) > 1: # heatmaps and/or bboxes and joints
ret = [r[0] for r in ret]
else: # joints only
ret = ret[0]
return ret
def _predict_batch(self, image):
with torch.no_grad():
heatmaps_list = None
tags_list = []
# scales and base (size, center, scale)
scales = (1,) # ToDo add support to multiple scales
scales = sorted(scales, reverse=True)
base_size, base_center, base_scale = get_multi_scale_size(
image[0], self.resolution, 1, 1
)
# for each scale (at the moment, just one scale)
for idx, scale in enumerate(scales):
# rescale image, convert to tensor, move to device
images = list()
for img in image:
image, size_resized, _, _ = resize_align_multi_scale(
img, self.resolution, scale, min(scales), interpolation=self.interpolation
)
image = self.transform(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)).unsqueeze(dim=0)
image = image.to(self.device)
images.append(image)
images = torch.cat(images)
# inference
# output: list of HigherHRNet outputs (heatmaps)
# avg_heatmaps: averaged heatmaps
# tags: per-pixel identity ids.
# See Newell et al., Associative Embedding: End-to-End Learning for Joint Detection and
# Grouping, NIPS 2017. https://arxiv.org/abs/1611.05424 or
# http://papers.nips.cc/paper/6822-associative-embedding-end-to-end-learning-for-joint-detection-and-grouping
outputs, heatmaps, tags = get_multi_stage_outputs(
self.model, images, with_flip=False, project2image=True, size_projected=size_resized,
nof_joints=self.nof_joints, max_batch_size=self.max_batch_size
)
# aggregate the multiple heatmaps and tags
heatmaps_list, tags_list = aggregate_results(
scale, heatmaps_list, tags_list, heatmaps, tags, with_flip=False, project2image=True
)
heatmaps = heatmaps_list.float() / len(scales)
tags = torch.cat(tags_list, dim=4)
# refine prediction
# grouped has the shape (people, joints, 4) -> 4: (x, y, confidence, tag)
# scores has the shape (people, ) and corresponds to the person confidence before refinement
grouped, scores = self.output_parser.parse(
heatmaps, tags, adjust=True, refine=True # ToDo parametrize these two parameters
)
# get final predictions
final_results = get_final_preds(
grouped, base_center, base_scale, [heatmaps.shape[3], heatmaps.shape[2]]
)
if self.filter_redundant_poses:
# filter redundant poses - this step filters out poses whose joints have, on average, a difference
# lower than 3 pixels
# this is useful when refine=True in self.output_parser.parse because that step joins together
# skeleton parts belonging to the same people (but then it does not remove redundant skeletons)
final_pts = []
# for each image
for i in range(len(final_results)):
final_pts.insert(i, list())
# for each person
for pts in final_results[i]:
if len(final_pts[i]) > 0:
diff = np.mean(np.abs(np.array(final_pts[i])[..., :2] - pts[..., :2]), axis=(1, 2))
if np.any(diff < 3): # average diff between this pose and another one is less than 3 pixels
continue
final_pts[i].append(pts)
final_results = final_pts
pts = []
boxes = []
for i in range(len(final_results)):
pts.insert(i, np.asarray(final_results[i]))
if len(pts[i]) > 0:
pts[i][..., [0, 1]] = pts[i][..., [1, 0]] # restoring (y, x) order as in SimpleHRNet
pts[i] = pts[i][..., :3]
if self.return_bounding_boxes:
left_top = np.min(pts[i][..., 0:2], axis=1)
right_bottom = np.max(pts[i][..., 0:2], axis=1)
# [x1, y1, x2, y2]
boxes.insert(i, np.stack(
[left_top[:, 1], left_top[:, 0], right_bottom[:, 1], right_bottom[:, 0]], axis=-1
))
else:
boxes.insert(i, [])
res = list()
if self.return_heatmaps:
res.append(heatmaps)
if self.return_bounding_boxes:
res.append(boxes)
res.append(pts)
if len(res) > 1:
return res
else:
return res[0]
if __name__ == '__main__':
hhrnet = SimpleHigherHRNet(
c=32, nof_joints=17, checkpoint_path='./weights/pose_higher_hrnet_w32_512.pth',
resolution=512, device='cuda'
)
# img = np.ones((384, 256, 3), dtype=np.uint8)
import cv2
img = cv2.imread('./sample.jpg', cv2.IMREAD_ANYCOLOR)
hhrnet.predict(img)
``` |
{
"source": "JingyueLu/GNN_branching",
"score": 2
} |
#### File: convex_adversarial/examples/problems.py
```python
import sys
sys.path.append('.')
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.nn.functional as F
import numpy as np
import torch.utils.data as td
import argparse
from convex_adversarial import epsilon_from_model, DualNetBounds
from convex_adversarial import Dense, DenseSequential
import math
import os
def model_wide(in_ch, out_width, k):
model = nn.Sequential(
nn.Conv2d(in_ch, 4*k, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(4*k, 8*k, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(8*k*out_width*out_width,k*128),
nn.ReLU(),
nn.Linear(k*128, 10)
)
return model
def model_deep(in_ch, out_width, k, n1=8, n2=16, linear_size=100):
def group(inf, outf, N):
if N == 1:
conv = [nn.Conv2d(inf, outf, 4, stride=2, padding=1),
nn.ReLU()]
else:
conv = [nn.Conv2d(inf, outf, 3, stride=1, padding=1),
nn.ReLU()]
for _ in range(1,N-1):
conv.append(nn.Conv2d(outf, outf, 3, stride=1, padding=1))
conv.append(nn.ReLU())
conv.append(nn.Conv2d(outf, outf, 4, stride=2, padding=1))
conv.append(nn.ReLU())
return conv
conv1 = group(in_ch, n1, k)
conv2 = group(n1, n2, k)
model = nn.Sequential(
*conv1,
*conv2,
Flatten(),
nn.Linear(n2*out_width*out_width,linear_size),
nn.ReLU(),
nn.Linear(100, 10)
)
return model
## 14*14*8 (1568) --> 14*14*8 (1568) --> 14*14*8 (1568) --> 392 --> 100 (5196 ReLUs)
def mnist_model_deep_custom():
model = nn.Sequential(
nn.Conv2d(1, 8, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(8, 8, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(8, 8, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(8, 8, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(8*7*7,100),
nn.ReLU(),
nn.Linear(100, 10)
)
return model
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
def mnist_loaders(batch_size, shuffle_test=False):
mnist_train = datasets.MNIST("./data", train=True, download=True, transform=transforms.ToTensor())
mnist_test = datasets.MNIST("./data", train=False, download=True, transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, pin_memory=True)
test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=shuffle_test, pin_memory=True)
return train_loader, test_loader
def fashion_mnist_loaders(batch_size):
mnist_train = datasets.MNIST("./fashion_mnist", train=True,
download=True, transform=transforms.ToTensor())
mnist_test = datasets.MNIST("./fashion_mnist", train=False,
download=True, transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, pin_memory=True)
test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, pin_memory=True)
return train_loader, test_loader
def mnist_500():
model = nn.Sequential(
Flatten(),
nn.Linear(28*28,500),
nn.ReLU(),
nn.Linear(500, 10)
)
return model
def mnist_model():
model = nn.Sequential(
nn.Conv2d(1, 16, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(32*7*7,100),
nn.ReLU(),
nn.Linear(100, 10)
)
return model
def mnist_model_wide(k):
return model_wide(1, 7, k)
def mnist_model_deep(k):
return model_deep(1, 7, k)
def mnist_model_large():
model = nn.Sequential(
nn.Conv2d(1, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(64*7*7,512),
nn.ReLU(),
nn.Linear(512,512),
nn.ReLU(),
nn.Linear(512,10)
)
return model
def replace_10_with_0(y):
return y % 10
def svhn_loaders(batch_size):
train = datasets.SVHN("./data", split='train', download=True, transform=transforms.ToTensor(), target_transform=replace_10_with_0)
test = datasets.SVHN("./data", split='test', download=True, transform=transforms.ToTensor(), target_transform=replace_10_with_0)
train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False, pin_memory=True)
return train_loader, test_loader
def svhn_model():
model = nn.Sequential(
nn.Conv2d(3, 16, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(32*8*8,100),
nn.ReLU(),
nn.Linear(100, 10)
).cuda()
return model
def har_loaders(batch_size):
X_te = torch.from_numpy(np.loadtxt('./data/UCI HAR Dataset/test/X_test.txt')).float()
X_tr = torch.from_numpy(np.loadtxt('./data/UCI HAR Dataset/train/X_train.txt')).float()
y_te = torch.from_numpy(np.loadtxt('./data/UCI HAR Dataset/test/y_test.txt')-1).long()
y_tr = torch.from_numpy(np.loadtxt('./data/UCI HAR Dataset/train/y_train.txt')-1).long()
har_train = td.TensorDataset(X_tr, y_tr)
har_test = td.TensorDataset(X_te, y_te)
train_loader = torch.utils.data.DataLoader(har_train, batch_size=batch_size, shuffle=True, pin_memory=True)
test_loader = torch.utils.data.DataLoader(har_test, batch_size=batch_size, shuffle=False, pin_memory=True)
return train_loader, test_loader
def har_500_model():
model = nn.Sequential(
nn.Linear(561, 500),
nn.ReLU(),
nn.Linear(500, 6)
)
return model
def har_500_250_model():
model = nn.Sequential(
nn.Linear(561, 500),
nn.ReLU(),
nn.Linear(500, 250),
nn.ReLU(),
nn.Linear(250, 6)
)
return model
def har_500_250_100_model():
model = nn.Sequential(
nn.Linear(561, 500),
nn.ReLU(),
nn.Linear(500, 250),
nn.ReLU(),
nn.Linear(250, 100),
nn.ReLU(),
nn.Linear(100, 6)
)
return model
def har_resnet_model():
model = DenseSequential(
Dense(nn.Linear(561, 561)),
nn.ReLU(),
Dense(nn.Sequential(), None, nn.Linear(561,561)),
nn.ReLU(),
nn.Linear(561,6)
)
return model
def cifar_loaders(batch_size, shuffle_test=False):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.225, 0.225, 0.225])
train = datasets.CIFAR10('./data', train=True, download=True,
transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]))
test = datasets.CIFAR10('./data', train=False,
transform=transforms.Compose([transforms.ToTensor(), normalize]))
train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size,
shuffle=True, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size,
shuffle=shuffle_test, pin_memory=True)
return train_loader, test_loader
# 6244 ReLUs
# wide model
def cifar_model():
model = nn.Sequential(
nn.Conv2d(3, 16, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(32*8*8,100),
nn.ReLU(),
nn.Linear(100, 10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
# 6756 ReLUs
# deep model
def cifar_model_deep():
model = nn.Sequential(
nn.Conv2d(3, 8, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(8, 8, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(8, 8, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(8, 8, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(8*8*8, 100),
nn.ReLU(),
nn.Linear(100, 10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
# 16*16*8 (2048) --> 16*8*8 (1024) --> 100
# 3172 ReLUs (small model)
def cifar_model_m2():
model = nn.Sequential(
nn.Conv2d(3, 8, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(8, 16, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(16*8*8,100),
nn.ReLU(),
nn.Linear(100, 10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def cifar_model_large():
model = nn.Sequential(
nn.Conv2d(3, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(64*8*8,512),
nn.ReLU(),
nn.Linear(512,512),
nn.ReLU(),
nn.Linear(512,10)
)
return model
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def cifar_model_resnet(N = 5, factor=10):
def block(in_filters, out_filters, k, downsample):
if not downsample:
k_first = 3
skip_stride = 1
k_skip = 1
else:
k_first = 4
skip_stride = 2
k_skip = 2
return [
Dense(nn.Conv2d(in_filters, out_filters, k_first, stride=skip_stride, padding=1)),
nn.ReLU(),
Dense(nn.Conv2d(in_filters, out_filters, k_skip, stride=skip_stride, padding=0),
None,
nn.Conv2d(out_filters, out_filters, k, stride=1, padding=1)),
nn.ReLU()
]
conv1 = [nn.Conv2d(3,16,3,stride=1,padding=1), nn.ReLU()]
conv2 = block(16,16*factor,3, False)
for _ in range(N):
conv2.extend(block(16*factor,16*factor,3, False))
conv3 = block(16*factor,32*factor,3, True)
for _ in range(N-1):
conv3.extend(block(32*factor,32*factor,3, False))
conv4 = block(32*factor,64*factor,3, True)
for _ in range(N-1):
conv4.extend(block(64*factor,64*factor,3, False))
layers = (
conv1 +
conv2 +
conv3 +
conv4 +
[Flatten(),
nn.Linear(64*factor*8*8,1000),
nn.ReLU(),
nn.Linear(1000, 10)]
)
model = DenseSequential(
*layers
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
return model
def argparser(batch_size=50, epochs=20, seed=0, verbose=1, lr=1e-3,
epsilon=0.1, starting_epsilon=None,
proj=None,
norm_train='l1', norm_test='l1',
opt='sgd', momentum=0.9, weight_decay=5e-4):
parser = argparse.ArgumentParser()
# optimizer settings
parser.add_argument('--opt', default=opt)
parser.add_argument('--momentum', type=float, default=momentum)
parser.add_argument('--weight_decay', type=float, default=weight_decay)
parser.add_argument('--batch_size', type=int, default=batch_size)
parser.add_argument('--test_batch_size', type=int, default=batch_size)
parser.add_argument('--epochs', type=int, default=epochs)
parser.add_argument("--lr", type=float, default=lr)
# epsilon settings
parser.add_argument("--epsilon", type=float, default=epsilon)
parser.add_argument("--starting_epsilon", type=float, default=starting_epsilon)
parser.add_argument('--schedule_length', type=int, default=10)
# projection settings
parser.add_argument('--proj', type=int, default=proj)
parser.add_argument('--norm_train', default=norm_train)
parser.add_argument('--norm_test', default=norm_test)
# model arguments
parser.add_argument('--model', default=None)
parser.add_argument('--model_factor', type=int, default=8)
parser.add_argument('--cascade', type=int, default=1)
parser.add_argument('--method', default=None)
parser.add_argument('--resnet_N', type=int, default=1)
parser.add_argument('--resnet_factor', type=int, default=1)
# other arguments
parser.add_argument('--prefix')
parser.add_argument('--load')
parser.add_argument('--real_time', action='store_true')
parser.add_argument('--seed', type=int, default=seed)
parser.add_argument('--verbose', type=int, default=verbose)
parser.add_argument('--cuda_ids', default=None)
args = parser.parse_args()
if args.starting_epsilon is None:
args.starting_epsilon = args.epsilon
if args.prefix:
if args.model is not None:
args.prefix += '_'+args.model
if args.method is not None:
args.prefix += '_'+args.method
banned = ['verbose', 'prefix',
'resume', 'baseline', 'eval',
'method', 'model', 'cuda_ids', 'load', 'real_time',
'test_batch_size']
if args.method == 'baseline':
banned += ['epsilon', 'starting_epsilon', 'schedule_length',
'l1_test', 'l1_train', 'm', 'l1_proj']
# Ignore these parameters for filename since we never change them
banned += ['momentum', 'weight_decay']
if args.cascade == 1:
banned += ['cascade']
# if not using a model that uses model_factor,
# ignore model_factor
if args.model not in ['wide', 'deep']:
banned += ['model_factor']
# if args.model != 'resnet':
banned += ['resnet_N', 'resnet_factor']
for arg in sorted(vars(args)):
if arg not in banned and getattr(args,arg) is not None:
args.prefix += '_' + arg + '_' +str(getattr(args, arg))
if args.schedule_length > args.epochs:
raise ValueError('Schedule length for epsilon ({}) is greater than '
'number of epochs ({})'.format(args.schedule_length, args.epochs))
else:
args.prefix = 'temporary'
if args.cuda_ids is not None:
print('Setting CUDA_VISIBLE_DEVICES to {}'.format(args.cuda_ids))
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_ids
return args
def args2kwargs(args, X=None):
if args.proj is not None:
kwargs = {
'proj' : args.proj,
}
else:
kwargs = {
}
kwargs['parallel'] = (args.cuda_ids is not None)
return kwargs
def argparser_evaluate(epsilon=0.1, norm='l1'):
parser = argparse.ArgumentParser()
parser.add_argument("--epsilon", type=float, default=epsilon)
parser.add_argument('--proj', type=int, default=None)
parser.add_argument('--norm', default=norm)
parser.add_argument('--model', default=None)
parser.add_argument('--dataset', default='mnist')
parser.add_argument('--load')
parser.add_argument('--output')
parser.add_argument('--real_time', action='store_true')
# parser.add_argument('--seed', type=int, default=seed)
parser.add_argument('--verbose', type=int, default=True)
parser.add_argument('--cuda_ids', default=None)
args = parser.parse_args()
if args.cuda_ids is not None:
print('Setting CUDA_VISIBLE_DEVICES to {}'.format(args.cuda_ids))
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_ids
return args
```
#### File: GNN_branching/plnn/branch_and_bound.py
```python
import bisect
import math
import torch
class CandidateDomain:
'''
Object representing a domain as produced by the BranchAndBound algorithm.
Comparison between its elements is based on the values of the lower bounds
that are estimated for it.
'''
def __init__(self, lb=-float('inf'), ub=float('inf'), dm=None):
self.lower_bound = lb
self.upper_bound = ub
self.domain = dm
def __lt__(self, other):
return self.lower_bound < other.lower_bound
def __le__(self, other):
return self.lower_bound <= other.lower_bound
def __eq__(self, other):
return self.lower_bound == other.lower_bound
def __repr__(self):
string = f"[LB: {self.lower_bound:.4e}\t" \
f" UB: {self.upper_bound:.4e}\n" \
f" Domain: {self.domain}]"
return string
def area(self):
'''
Compute the area of the domain
'''
dom_sides = self.domain.select(1, 1) - self.domain.select(1, 0)
dom_area = dom_sides.prod()
return dom_area
def no_grad(f):
def g(*args, **kwargs):
with torch.no_grad():
return f(*args, **kwargs)
return g
@no_grad
def bab(net, domain, eps=1e-3, decision_bound=None, smart_branching=None):
'''
Uses branch and bound algorithm to evaluate the global minimum
of a given neural network.
`net` : Neural Network class, defining the `get_upper_bound` and
`get_lower_bound` functions
`domain` : Tensor defining the search bounds at each dimension.
`eps` : Maximum difference between the UB and LB over the minimum
before we consider having converged
`decision_bound`: If not None, stop the search if the UB and LB are both
superior or both inferior to this value.
Returns : Lower bound and Upper bound on the global minimum,
as well as the point where the upper bound is achieved
'''
nb_visited_states = 0
global_ub_point, global_ub = net.get_upper_bound(domain)
global_lb = net.get_lower_bound(domain)
inp_shape = domain.shape[:-1]
normed_domain = torch.stack((torch.zeros(inp_shape),
torch.ones(inp_shape)), 1)
domain_lb = domain.select(-1, 0)
domain_width = domain.select(-1, 1) - domain.select(-1, 0)
domain_lb = domain_lb.contiguous().unsqueeze_(-1).expand(inp_shape + (2,))
domain_width = domain_width.unsqueeze_(-1).expand(inp_shape + (2,))
# Use objects of type CandidateDomain to store domains with their bounds.
candidate_domain = CandidateDomain(lb=global_lb, ub=global_ub,
dm=normed_domain)
domains = [candidate_domain]
# This counter is used to decide when to prune domains
prune_counter = 0
while global_ub - global_lb > eps:
# Pick a domain to branch over and remove that from our current list of
# domains. Also potentially perform some pruning on the way.
selected_candidate_domain = pick_out(domains, global_ub-eps)
# Genearate new, smaller (normalized) domains using box split.
if smart_branching is None:
# Simply do longest edge
ndoms = box_split(selected_candidate_domain.domain)
else:
# Follow the heuristic
useful_cutoff = global_ub - eps
if decision_bound is not None:
useful_cutoff = min(useful_cutoff, decision_bound)
ndoms = smart_box_split(selected_candidate_domain.domain, smart_branching,
domain_lb, domain_width, useful_cutoff)
for ndom_i in ndoms:
nb_visited_states += 1
if (nb_visited_states % 10) == 0:
print(f"Running Nb states visited: {nb_visited_states}")
# Find the upper and lower bounds on the minimum in dom_i
dom_i = domain_lb + domain_width * ndom_i
dom_ub_point, dom_ub = net.get_upper_bound(dom_i)
dom_lb = net.get_lower_bound(dom_i)
# Update the global upper if the new upper bound found is lower.
if dom_ub < global_ub:
global_ub = dom_ub
global_ub_point = dom_ub_point
# Add the domain to our current list of domains if its lowerbound
# is less than the global upperbound.
if dom_lb < global_ub:
candidate_domain_to_add = CandidateDomain(lb=dom_lb,
ub=dom_ub,
dm=ndom_i)
add_domain(candidate_domain_to_add, domains)
prune_counter += 1
# Prune domains whose lowerbounds are larger than or equal to the
# global upperbound.
# If domains list is larger than 100 items and if prune_counter has
# reached a threshold, prune domains that we no longer need.
if prune_counter >= 100 and len(domains) >= 100:
# Remove domains with dom_lb >= global_ub
domains = prune_domains(domains, global_ub-eps)
prune_counter = 0
# Do a pass over all the remaining domains to evaluate how much of
# the input is there left to prune
# print_remaining_domain(domains)
print(f"Current: lb: {global_lb}\t ub: {global_ub}")
# Update the global lower bound with a lower bound that belongs
# to a domain in the updated list "domains" .
# TODO: This current implementation is only a global lower bound
# if we sort domains by lower_bound.
if len(domains) > 0:
global_lb = domains[0].lower_bound
print('current glb: ', global_lb)
else:
# if there is no more domains, we have pruned them all.
global_lb = global_ub - eps
# Stopping criterion
if decision_bound is not None:
if (global_lb >= decision_bound):
break
elif (global_ub < decision_bound):
break
return global_lb, global_ub, global_ub_point, nb_visited_states
def add_domain(candidate, domains):
'''
Use binary search to add the new domain `candidate`
to the candidate list `domains` so that `domains` remains a sorted list.
'''
bisect.insort_left(domains, candidate)
def pick_out(domains, threshold):
'''
Pick the first domain in the `domains` sequence
that has a lower bound lower than `threshold`.
Any domain appearing before the chosen one but having a lower_bound greater
than the threshold is discarded.
Returns: Non prunable CandidateDomain with the lowest reference_value.
'''
assert len(domains) > 0, "The given domains list is empty."
while True:
assert len(domains) > 0, "No domain left to pick from."
selected_candidate_domain = domains.pop(0)
if selected_candidate_domain.lower_bound < threshold:
break
return selected_candidate_domain
def box_split(domain):
'''
Use box-constraints to split the input domain.
Split by dividing the domain into two from its longest edge.
Assumes a rectangular domain, which is aligned with the cartesian
coordinate frame.
`domain`: A 2d tensor whose rows contain lower and upper limits
of the corresponding dimension.
Returns: A list of sub-domains represented as 2d tensors.
'''
# Find the longest edge by checking the difference of lower and upper
# limits in each dimension.
diff = domain[:, 1] - domain[:, 0]
edgelength, dim = torch.max(diff, 0)
# Unwrap from tensor containers
edgelength = edgelength.item()
dim = dim.item()
# Now split over dimension dim:
half_length = edgelength/2
# dom1: Upper bound in the 'dim'th dimension is now at halfway point.
dom1 = domain.clone()
dom1[dim, 1] -= half_length
# dom2: Lower bound in 'dim'th dimension is now at haflway point.
dom2 = domain.clone()
dom2[dim, 0] += half_length
sub_domains = [dom1, dom2]
return sub_domains
def smart_box_split(ndomain, dualnet, domain_lb, domain_width, useful_cutoff):
'''
Use box-constraints to split the input domain.
Split by dividing the domain into two.
We decide on which dimension to split by trying all splits with a cheap lower bound.
`domain`: A 2d tensor whose rows contain lower and upper limits
of the corresponding dimension.
Returns: A list of sub-domains represented as 2d tensors.
'''
# We're going to try all possible combinations and get the bounds for each,
# and pick the one with the largest (lowest lower bound of the two part)
domain = domain_lb + domain_width * ndomain
largest_lowest_lb = -float('inf')
largest_lowest_lb_dim = None
split_lbs = None
for dim in range(domain.shape[0]):
# Split alongst the i-th dimension
dom1 = domain.clone()
dom1[dim, 1] = (dom1[dim, 1] + dom1[dim, 0]) / 2
dom2 = domain.clone()
dom2[dim, 0] = (dom2[dim, 1] + dom2[dim, 0]) / 2
both_doms = torch.stack([dom1, dom2], 0)
lbs = dualnet.get_lower_bounds(both_doms)
lowest_lb = lbs.min()
if lowest_lb > largest_lowest_lb:
largest_lowest_lb = lowest_lb
largest_lowest_lb_dim = dim
split_lbs = lbs
ndom1 = ndomain.clone()
ndom1[largest_lowest_lb_dim, 1] = (ndom1[largest_lowest_lb_dim, 1] + ndom1[largest_lowest_lb_dim, 0]) / 2
ndom2 = ndomain.clone()
ndom2[largest_lowest_lb_dim, 0] = (ndom2[largest_lowest_lb_dim, 1] + ndom2[largest_lowest_lb_dim, 0]) / 2
sub_domains = [ndom1, ndom2]
return sub_domains
def prune_domains(domains, threshold):
'''
Remove domain from `domains`
that have a lower_bound greater than `threshold`
'''
# TODO: Could do this with binary search rather than iterating.
# TODO: If this is not sorted according to lower bounds, this
# implementation is incorrect because we can not reason about the lower
# bounds of the domain that come after
for i in range(len(domains)):
if domains[i].lower_bound >= threshold:
domains = domains[0:i]
break
return domains
def print_remaining_domain(domains):
'''
Iterate over all the domains, measuring the part of the whole input space
that they contain and print the total share it represents.
'''
remaining_area = 0
for dom in domains:
remaining_area += dom.area()
print(f'Remaining portion of the input space: {remaining_area*100:.8f}%')
```
#### File: GNN_branching/plnn/conv_kwinter_kw.py
```python
import gurobipy as grb
import math
import torch
from convex_adversarial import DualNetwork
from convex_adversarial.dual_layers import DualLinear, DualReLU
from plnn.dual_network_linear_approximation import LooseDualNetworkApproximation
from plnn.modules import View, Flatten
from torch.autograd import Variable
from torch.nn import functional as F
import time
from torch import nn
class LinearizedNetwork:
def __init__(self, layers):
'''
layers: A list of Pytorch layers containing only Linear/ReLU/MaxPools
'''
self.layers = layers
self.net = nn.Sequential(*layers)
def get_upper_bound_random(self, domain):
'''
Compute an upper bound of the minimum of the network on `domain`
Any feasible point is a valid upper bound on the minimum so we will
perform some random testing.
'''
nb_samples = 2056
nb_inp = domain.size(0)
# Not a great way of sampling but this will be good enough
# We want to get rows that are >= 0
rand_samples = torch.Tensor(nb_samples, nb_inp)
rand_samples.uniform_(0, 1)
domain_lb = domain.select(1, 0).contiguous()
domain_ub = domain.select(1, 1).contiguous()
domain_width = domain_ub - domain_lb
domain_lb = domain_lb.view(1, nb_inp).expand(nb_samples, nb_inp)
domain_width = domain_width.view(1, nb_inp).expand(nb_samples, nb_inp)
with torch.no_grad():
inps = domain_lb + domain_width * rand_samples
outs = self.net(inps)
upper_bound, idx = torch.min(outs, dim=0)
upper_bound = upper_bound[0].item()
ub_point = inps[idx].squeeze()
return ub_point, upper_bound
def get_upper_bound_pgd(self, domain_lb, domain_ub, ub_point):
'''
Compute an upper bound of the minimum of the network on `domain`
Any feasible point is a valid upper bound on the minimum so we will
perform some random testing.
'''
nb_samples = 2056
torch.set_num_threads(1)
nb_inp = torch.tensor(ub_point.size()).type(torch.long)
nb_inp[0] = nb_samples
nb_inp = nb_inp.tolist()
# Not a great way of sampling but this will be good enough
# We want to get rows that are >= 0
#rand_samples = torch.randn(nb_inp)
rand_samples = torch.rand(nb_inp)
best_ub = float('inf')
best_ub_inp = None
#domain_lb = domain.select(1, 0).contiguous()
#domain_ub = domain.select(1, 1).contiguous()
domain_lb = domain_lb.unsqueeze(0)
domain_ub = domain_ub.unsqueeze(0)
domain_width = domain_ub - domain_lb
ub_point_expanded = ub_point.expand(nb_inp)
domain_width = domain_width.expand(nb_inp)
domain_lb = domain_lb.expand(nb_inp)
inps = domain_lb + domain_width * rand_samples
#inps = ub_point_expanded + (domain_width/2) * rand_samples
#inps = torch.max(domain_lb, inps)
#inps = torch.min(domain_ub, inps)
inps[0] = ub_point.clone()
inps = Variable(inps, requires_grad=True)
batch_ub = float('inf')
for i in range(1000):
prev_batch_best = batch_ub
self.net.zero_grad()
#inps.zero_grad()
out = self.net(inps)
batch_ub = out.min().item()
if batch_ub < best_ub:
best_ub = batch_ub
# print(f"New best lb: {best_lb}")
_, idx = out.min(dim=0)
best_ub_inp = inps[idx[0]]
if batch_ub >= prev_batch_best:
break
#print(best_ub)
all_samp_sum = out.sum() / nb_samples
all_samp_sum.backward()
grad = inps.grad
max_grad, _ = grad.max(dim=0)
min_grad, _ = grad.min(dim=0)
grad_diff = max_grad - min_grad
lr = 1e-2 * domain_width / grad_diff
min_lr = lr.min()
with torch.no_grad():
step = -min_lr*grad
inps += step
#inps= inps.clamp(domain_lb, domain_ub)
inps = torch.max(domain_lb,inps)
inps = torch.min(inps, domain_ub)
inps = Variable(inps, requires_grad=True)
return best_ub_inp, best_ub
get_upper_bound = get_upper_bound_pgd
class InfeasibleMaskException(Exception):
pass
class KWConvNetwork(LinearizedNetwork):
def __init__(self, layers):
'''
layers: A list of Pytorch layers containing only Linear/ReLU/MaxPools
'''
super(KWConvNetwork, self).__init__(layers)
def get_lower_bound(self, relu_mask, pre_lbs, pre_ubs, decision, choice):
try:
start = time.time()
gub, glb, ub_point, new_mask, lower_bounds, upper_bounds = self.update_the_model(relu_mask, pre_lbs, pre_ubs, decision, choice)
end = time.time()
print('KW_Int define_linear: ', end-start)
return gub, glb,ub_point, new_mask, lower_bounds, upper_bounds
except InfeasibleMaskException:
# The model is infeasible, so this mask is wrong.
# We just return an infinite lower bound
return float('inf'), float('inf'), None,relu_mask, None, None
def check_optimization_success(self, introduced_constrs=None):
if self.model.status == 2:
# Optimization successful, nothing to complain about
pass
elif self.model.status == 3:
self.model.remove(introduced_constrs)
# The model is infeasible. We have made incompatible
# assumptions, so this subdomain doesn't exist.
raise InfeasibleMaskException()
else:
print('\n')
print(f'model.status: {self.model.status}\n')
raise NotImplementedError
def build_the_model(self, input_domain, x, ball_eps, bounded):
'''
Before the first branching, we build the model and create a mask matrix
Output: relu_mask, current intermediate upper and lower bounds, a list of
indices of the layers right before a Relu layer
the constructed gurobi model
NOTE: we keep all bounds as a list of tensors from now on.
Only lower and upper bounds are kept in the same shape as layers' outputs.
Mask is linearized
Gurobi_var lists are lineariezd
self.model_lower_bounds and self.model_upper_bounds are kepts mainly for
debugging purpose and could be removed
'''
new_relu_mask = []
lower_bounds = []
upper_bounds = []
## NEW STRUCTURE: deal with all available bounds first
# first get KW bounds
self.loose_dual = LooseDualNetworkApproximation(self.layers, x, ball_eps)
kw_lb, kw_ub, pre_relu_indices, dual_info = self.loose_dual.init_kw_bounds(bounded)
# second get interval bounds
if len(input_domain.size()) == 2:
lower_bounds.append(input_domain[:,0].squeeze(-1))
upper_bounds.append(input_domain[:,1].squeeze(-1))
else:
lower_bounds.append(input_domain[:,:,:,0].squeeze(-1))
upper_bounds.append(input_domain[:,:,:,1].squeeze(-1))
layer_idx = 1
for layer in self.layers:
new_layer_lb = []
new_layer_ub = []
if type(layer) is nn.Linear:
pre_lb = lower_bounds[-1]
pre_ub = upper_bounds[-1]
pos_w = torch.clamp(layer.weight, 0, None)
neg_w = torch.clamp(layer.weight, None, 0)
out_lbs = pos_w @ pre_lb + neg_w @ pre_ub + layer.bias
out_ubs = pos_w @ pre_ub + neg_w @ pre_lb + layer.bias
# Get the better estimates from KW and Interval Bounds
new_layer_lb = torch.max(kw_lb[layer_idx], out_lbs)
new_layer_ub = torch.min(kw_ub[layer_idx], out_ubs)
elif type(layer) is nn.Conv2d:
assert layer.dilation == (1, 1)
pre_lb = lower_bounds[-1].unsqueeze(0)
pre_ub = upper_bounds[-1].unsqueeze(0)
pos_weight = torch.clamp(layer.weight, 0, None)
neg_weight = torch.clamp(layer.weight, None, 0)
out_lbs = (F.conv2d(pre_lb, pos_weight, layer.bias,
layer.stride, layer.padding, layer.dilation, layer.groups)
+ F.conv2d(pre_ub, neg_weight, None,
layer.stride, layer.padding, layer.dilation, layer.groups))
out_ubs = (F.conv2d(pre_ub, pos_weight, layer.bias,
layer.stride, layer.padding, layer.dilation, layer.groups)
+ F.conv2d(pre_lb, neg_weight, None,
layer.stride, layer.padding, layer.dilation, layer.groups))
new_layer_lb = (torch.max(kw_lb[layer_idx], out_lbs)).squeeze(0)
new_layer_ub = (torch.min(kw_ub[layer_idx], out_ubs)).squeeze(0)
elif type(layer) == nn.ReLU:
new_layer_lb = F.relu(lower_bounds[-1])
new_layer_ub = F.relu(upper_bounds[-1])
elif type(layer) == View:
continue
elif type(layer) == Flatten:
new_layer_lb = lower_bounds[-1].view(-1)
new_layer_ub = upper_bounds[-1].view(-1)
else:
#print(type(layer))
raise NotImplementedError
lower_bounds.append(new_layer_lb)
upper_bounds.append(new_layer_ub)
layer_idx += 1
# compare KW_INT bounds with KW bounds.
# if they are different, reupdate the kw model
for pre_idx in pre_relu_indices:
if torch.sum(abs(lower_bounds[pre_idx]-kw_lb[pre_idx])>1e-4)==0 and torch.sum(abs(upper_bounds[pre_idx]-kw_ub[pre_idx])>1e-4)==0:
pass
else:
print(f"initial kw: change_idx at {pre_idx}")
lower_bounds, upper_bounds, dual_info = self.loose_dual.update_kw_bounds( pre_idx, pre_lb_all = lower_bounds, pre_ub_all = upper_bounds, dual_info = dual_info)
break
# record the dual_info as an attribute of the loose_dual instance
# this should be the only dual instance recorded and should not
# be modified
#import pdb; pdb.set_trace()
self.loose_dual.orig_dual = dual_info
## NEW STRUCTURE: use the computed bounds to directly introduce gurobi models
# Initialize the model
self.model = grb.Model()
self.model.setParam('OutputFlag', False)
self.model.setParam('Threads', 1)
# keep a record of model's information
self.gurobi_vars = []
self.relu_constrs = []
self.relu_indices_mask = []
## Do the input layer, which is a special case
inp_gurobi_vars = []
zero_var = self.model.addVar(lb=0, ub=0, obj=0, vtype=grb.GRB.CONTINUOUS, name= 'zero')
if input_domain.dim() == 2:
# This is a linear input.
for dim, (lb, ub) in enumerate(input_domain):
v = self.model.addVar(lb=lb, ub=ub, obj=0,
vtype=grb.GRB.CONTINUOUS,
name=f'inp_{dim}')
inp_gurobi_vars.append(v)
else:
assert input_domain.dim() == 4
for chan in range(input_domain.size(0)):
chan_vars = []
for row in range(input_domain.size(1)):
row_vars = []
for col in range(input_domain.size(2)):
lb = input_domain[chan, row, col, 0]
ub = input_domain[chan, row, col, 1]
v = self.model.addVar(lb=lb, ub=ub, obj=0,
vtype=grb.GRB.CONTINUOUS,
name=f'inp_[{chan},{row},{col}]')
row_vars.append(v)
chan_vars.append(row_vars)
inp_gurobi_vars.append(chan_vars)
self.model.update()
self.gurobi_vars.append(inp_gurobi_vars)
## Do the other layers, computing for each of the neuron, its upper
## bound and lower bound
layer_idx = 1
for layer in self.layers:
new_layer_gurobi_vars = []
if type(layer) is nn.Linear:
# Get the better estimates from KW and Interval Bounds
out_lbs = lower_bounds[layer_idx]
out_ubs = upper_bounds[layer_idx]
for neuron_idx in range(layer.weight.size(0)):
lin_expr = layer.bias[neuron_idx].item()
coeffs = layer.weight[neuron_idx, :]
lin_expr += grb.LinExpr(coeffs, self.gurobi_vars[-1])
out_lb = out_lbs[neuron_idx].item()
out_ub = out_ubs[neuron_idx].item()
v = self.model.addVar(lb=out_lb, ub=out_ub, obj=0,
vtype=grb.GRB.CONTINUOUS,
name=f'lay{layer_idx}_{neuron_idx}')
self.model.addConstr(v == lin_expr)
self.model.update()
new_layer_gurobi_vars.append(v)
elif type(layer) is nn.Conv2d:
assert layer.dilation == (1, 1)
pre_lb_size = lower_bounds[layer_idx-1].unsqueeze(0).size()
out_lbs = lower_bounds[layer_idx].unsqueeze(0)
out_ubs = upper_bounds[layer_idx].unsqueeze(0)
for out_chan_idx in range(out_lbs.size(1)):
out_chan_vars = []
for out_row_idx in range(out_lbs.size(2)):
out_row_vars = []
for out_col_idx in range(out_lbs.size(3)):
lin_expr = layer.bias[out_chan_idx].item()
for in_chan_idx in range(layer.weight.shape[1]):
for ker_row_idx in range(layer.weight.shape[2]):
in_row_idx = -layer.padding[0] + layer.stride[0]*out_row_idx + ker_row_idx
if (in_row_idx < 0) or (in_row_idx == pre_lb_size[2]):
# This is padding -> value of 0
continue
for ker_col_idx in range(layer.weight.shape[3]):
in_col_idx = -layer.padding[1] + layer.stride[1]*out_col_idx + ker_col_idx
if (in_col_idx < 0) or (in_col_idx == pre_lb_size[3]):
# This is padding -> value of 0
continue
coeff = layer.weight[out_chan_idx, in_chan_idx, ker_row_idx, ker_col_idx].item()
lin_expr += coeff * self.gurobi_vars[-1][in_chan_idx][in_row_idx][in_col_idx]
out_lb = out_lbs[0, out_chan_idx, out_row_idx, out_col_idx].item()
out_ub = out_ubs[0, out_chan_idx, out_row_idx, out_col_idx].item()
v = self.model.addVar(lb=out_lb, ub=out_ub,
obj=0, vtype=grb.GRB.CONTINUOUS,
name=f'lay{layer_idx}_[{out_chan_idx}, {out_row_idx}, {out_col_idx}]')
self.model.addConstr(v == lin_expr)
self.model.update()
out_row_vars.append(v)
out_chan_vars.append(out_row_vars)
new_layer_gurobi_vars.append(out_chan_vars)
elif type(layer) == nn.ReLU:
new_relu_layer_constr = []
if isinstance(self.gurobi_vars[-1][0], list):
# This is convolutional
pre_lbs = lower_bounds[layer_idx-1]
pre_ubs = upper_bounds[layer_idx-1]
new_layer_mask = []
ratios_all = dual_info[0][layer_idx].d
bias_all = -pre_lbs*ratios_all
bias_all = bias_all*dual_info[0][layer_idx].I.squeeze(0).float()
bias_all = bias_all.squeeze(0)
#slope_all = pre_ubs/(pre_ubs-pre_lbs)
#bias_all = -pre_lbs*slope_all
#bias_all = bias_all*dual_info[0][layer_idx].I.squeeze(0).float()
#ratios_all = dual_info[0][layer_idx].d
for chan_idx, channel in enumerate(self.gurobi_vars[-1]):
chan_vars = []
for row_idx, row in enumerate(channel):
row_vars = []
for col_idx, pre_var in enumerate(row):
slope = ratios_all[0,chan_idx, row_idx, col_idx].item()
pre_ub = pre_ubs[chan_idx, row_idx, col_idx].item()
bias = bias_all[chan_idx, row_idx, col_idx].item()
if slope==1.0:
# ReLU is always passing
v = pre_var
new_layer_mask.append(1)
elif slope==0.0:
v = zero_var
new_layer_mask.append(0)
else:
lb = 0
ub = pre_ub
new_layer_mask.append(-1)
v =self.model.addVar(lb=lb, ub=ub,
obj=0, vtype=grb.GRB.CONTINUOUS,
name=f'ReLU{layer_idx}_[{chan_idx},{row_idx},{col_idx}]')
new_relu_layer_constr.append(self.model.addConstr(v >= pre_var))
new_relu_layer_constr.append(self.model.addConstr(v <= slope*pre_var + bias))
row_vars.append(v)
chan_vars.append(row_vars)
new_layer_gurobi_vars.append(chan_vars)
else:
pre_lbs = lower_bounds[layer_idx-1]
pre_ubs = upper_bounds[layer_idx-1]
new_layer_mask = []
#slope_all = pre_ubs/(pre_ubs-pre_lbs)
#bias_all = -pre_lbs*slope_all
#bias_all = bias_all*dual_info[0][layer_idx].I.squeeze(0).float()
#ratios_all = dual_info[0][layer_idx].d.squeeze(0)
ratios_all = dual_info[0][layer_idx].d.squeeze(0)
bias_all = -pre_lbs*ratios_all
bias_all = bias_all*dual_info[0][layer_idx].I.squeeze(0).float()
assert isinstance(self.gurobi_vars[-1][0], grb.Var)
for neuron_idx, pre_var in enumerate(self.gurobi_vars[-1]):
pre_ub = pre_ubs[neuron_idx].item()
slope = ratios_all[neuron_idx].item()
bias = bias_all[neuron_idx].item()
if slope==1.0:
# The ReLU is always passing
v = pre_var
new_layer_mask.append(1)
elif slope==0.0:
v = zero_var
# No need to add an additional constraint that v==0
# because this will be covered by the bounds we set on
# the value of v.
new_layer_mask.append(0)
else:
lb = 0
ub = pre_ub
v = self.model.addVar(lb=lb,
ub=ub,
obj=0,
vtype=grb.GRB.CONTINUOUS,
name=f'ReLU{layer_idx}_{neuron_idx}')
new_relu_layer_constr.append(self.model.addConstr(v >= pre_var))
new_relu_layer_constr.append(self.model.addConstr(v <= slope * pre_var + bias))
new_layer_mask.append(-1)
new_layer_gurobi_vars.append(v)
new_relu_mask.append(torch.tensor(new_layer_mask))
self.relu_constrs.append(new_relu_layer_constr)
elif type(layer) == View:
continue
elif type(layer) == Flatten:
for chan_idx in range(len(self.gurobi_vars[-1])):
for row_idx in range(len(self.gurobi_vars[-1][chan_idx])):
new_layer_gurobi_vars.extend(self.gurobi_vars[-1][chan_idx][row_idx])
else:
raise NotImplementedError
self.gurobi_vars.append(new_layer_gurobi_vars)
layer_idx += 1
# Assert that this is as expected a network with a single output
assert len(self.gurobi_vars[-1]) == 1, "Network doesn't have scalar output"
self.model.update()
print('finished building gurobi model, calling optimize function')
#import pdb; pdb.set_trace()
guro_start = time.time()
self.model.setObjective(self.gurobi_vars[-1][0], grb.GRB.MINIMIZE)
self.model.optimize()
#assert self.model.status == 2, "LP wasn't optimally solved"
self.check_optimization_success()
guro_end = time.time()
print('Gurobi solved the lp with ', guro_end - guro_start)
glb = self.gurobi_vars[-1][0].X
lower_bounds[-1] = torch.tensor([glb])
inp_size = lower_bounds[0].size()
mini_inp = torch.zeros(inp_size)
if len(inp_size)==1:
# This is a linear input.
for i in range(inp_size[0]):
mini_inp[i] = self.gurobi_vars[0][i].x
else:
for i in range(inp_size[0]):
for j in range(inp_size[1]):
for k in range(inp_size[2]):
mini_inp[i,j,k] = self.gurobi_vars[0][i][j][k].x
gub = self.net(mini_inp.unsqueeze(0)).item()
# record model information
# indices for undecided relu-nodes
self.relu_indices_mask = [(i==-1).nonzero().view(-1).tolist() for i in new_relu_mask]
# flatten high-dimensional gurobi var lists
for l_idx, layer in enumerate(self.layers):
if type(layer) is nn.Conv2d:
flattened_gurobi = []
for chan_idx in range(len(self.gurobi_vars[l_idx+1])):
for row_idx in range(len(self.gurobi_vars[l_idx+1][chan_idx])):
flattened_gurobi.extend(self.gurobi_vars[l_idx+1][chan_idx][row_idx])
self.gurobi_vars[l_idx+1] = flattened_gurobi
if type(self.layers[l_idx+1]) is nn.ReLU:
flattened_gurobi = []
for chan_idx in range(len(self.gurobi_vars[l_idx+2])):
for row_idx in range(len(self.gurobi_vars[l_idx+2][chan_idx])):
flattened_gurobi.extend(self.gurobi_vars[l_idx+2][chan_idx][row_idx])
self.gurobi_vars[l_idx+2] = flattened_gurobi
else:
continue
self.replacing_bd_index = len(lower_bounds)
return gub, glb, mini_inp.unsqueeze(0), new_relu_mask, lower_bounds, upper_bounds, pre_relu_indices
def update_the_model(self, relu_mask, pre_lb_all, pre_ub_all, decision, choice):
'''
The model updates upper and lower bounds after introducing a relu constraint and then update the gurobi model
using these updated bounds
input:
relu_mask: the copied mask of the parent domain,
pre_lb, pre_ub: lower and upper bounds of the parent domain
decision: the index of the node where we make branching decision
choice: force no-passing constraint (0) or all passing constraint (1)
pre_relu_indices: indices of bounds that the layers prior to a relu_layer
output: global lower bound, updated mask, updated lower and upper bounds
'''
# modifying the mask according to the branching decision and choice made
relu_mask[decision[0]][decision[1]] = choice
# Computing updated KW bounds
# first changed_bounds_index should be the index of
# the layer right before the relu layer we decide to split on
first_changed_bounds_index = self.loose_dual.pre_relu_indices[decision[0]]
self.replacing_bd_index = min(self.replacing_bd_index, first_changed_bounds_index)
lower_bounds, upper_bounds, new_dual_info = self.loose_dual.update_kw_bounds(self.replacing_bd_index, pre_lb_all, pre_ub_all, decision, choice)
## DEBUG
#lower_init, upper_init, _ = self.loose_dual.init_kw_bounds(False,pre_lb_all, pre_ub_all, decision, choice)
#print('checking bounds')
#for i in range(len(lower_init)):
# assert torch.sum(torch.abs(lower_init[i]-lower_bounds[i])) < 1e-3, 'lower is wrong'
# assert torch.sum(torch.abs(upper_init[i]-upper_bounds[i])) < 1e-3, 'upper is wrong'
#print('passed checking')
#compute interval bounds
change_idx = len(lower_bounds)
#assert change_idx==10, 'wrong'
inter_bounds_index = first_changed_bounds_index+2
for layer in self.layers[first_changed_bounds_index+1:]:
if type(layer) is nn.Linear:
pre_lb = lower_bounds[inter_bounds_index-1]
pre_ub = upper_bounds[inter_bounds_index-1]
pos_w = torch.clamp(layer.weight, 0, None)
neg_w = torch.clamp(layer.weight, None, 0)
out_lbs = pos_w @ pre_lb + neg_w @ pre_ub + layer.bias
out_ubs = pos_w @ pre_ub + neg_w @ pre_lb + layer.bias
# Get the better estimates from KW and Interval Bounds
if torch.sum(lower_bounds[inter_bounds_index]>=out_lbs).item()!= len(lower_bounds[inter_bounds_index]):
lower_bounds[inter_bounds_index] = torch.max(lower_bounds[inter_bounds_index], out_lbs)
change_idx = min(inter_bounds_index, change_idx)
if torch.sum(upper_bounds[inter_bounds_index]<=out_ubs).item()!= len(upper_bounds[inter_bounds_index]):
upper_bounds[inter_bounds_index] = torch.min(upper_bounds[inter_bounds_index], out_ubs)
change_idx = min(inter_bounds_index, change_idx)
elif type(layer) is nn.Conv2d:
assert layer.dilation == (1, 1)
pre_lb = lower_bounds[inter_bounds_index-1].unsqueeze(0)
pre_ub = upper_bounds[inter_bounds_index-1].unsqueeze(0)
pos_weight = torch.clamp(layer.weight, 0, None)
neg_weight = torch.clamp(layer.weight, None, 0)
out_lbs = (F.conv2d(pre_lb, pos_weight, layer.bias,
layer.stride, layer.padding, layer.dilation, layer.groups)
+ F.conv2d(pre_ub, neg_weight, None,
layer.stride, layer.padding, layer.dilation, layer.groups))
out_ubs = (F.conv2d(pre_ub, pos_weight, layer.bias,
layer.stride, layer.padding, layer.dilation, layer.groups)
+ F.conv2d(pre_lb, neg_weight, None,
layer.stride, layer.padding, layer.dilation, layer.groups))
#lower_bounds[inter_bounds_index] = torch.max(lower_bounds[inter_bounds_index], out_lbs).squeeze(0)
#upper_bounds[inter_bounds_index] = torch.min(upper_bounds[inter_bounds_index], out_ubs).squeeze(0)
if torch.sum(lower_bounds[inter_bounds_index]>=out_lbs).item()!= len(lower_bounds[inter_bounds_index].view(-1)):
lower_bounds[inter_bounds_index] = torch.max(lower_bounds[inter_bounds_index], out_lbs).squeeze(0)
change_idx = min(inter_bounds_index, change_idx)
if torch.sum(upper_bounds[inter_bounds_index]<=out_ubs).item()!= len(upper_bounds[inter_bounds_index].view(-1)):
upper_bounds[inter_bounds_index] = torch.min(upper_bounds[inter_bounds_index], out_ubs).squeeze(0)
change_idx = min(inter_bounds_index, change_idx)
elif type(layer) == nn.ReLU:
lower_bounds[inter_bounds_index] = F.relu(lower_bounds[inter_bounds_index-1])
upper_bounds[inter_bounds_index] = F.relu(upper_bounds[inter_bounds_index-1])
elif type(layer) == View:
continue
elif type(layer) == Flatten:
lower_bounds[inter_bounds_index] = lower_bounds[inter_bounds_index-1].view(-1)
upper_bounds[inter_bounds_index] = upper_bounds[inter_bounds_index-1].view(-1)
else:
raise NotImplementedError
inter_bounds_index += 1
if change_idx < len(lower_bounds)-1:
print(f'update_kw interval is better: change_idx at {change_idx}')
#lower_init, upper_init, _ = self.loose_dual.init_kw_bounds(lower_bounds, upper_bounds)
lower_bounds, upper_bounds, _ = self.loose_dual.update_kw_bounds(self.replacing_bd_index, lower_bounds, upper_bounds)
## DEBUG
#for i in range(len(lower_init)-1):
# assert torch.sum(torch.abs(lower_init[i]-lower_bounds_k[i])) < 1e-3, 'change lower is wrong'
# assert torch.sum(torch.abs(upper_init[i]-upper_bounds_k[i])) < 1e-3, 'change upper is wrong'
#lower_bounds = lower_bounds_k
#upper_bounds = upper_bounds_k
# reintroduce ub and lb for gurobi constraints
introduced_constrs = []
rep_index = self.replacing_bd_index
for layer in self.layers[self.replacing_bd_index-1:]:
if type(layer) is nn.Linear:
for idx, var in enumerate(self.gurobi_vars[rep_index]):
var.ub = upper_bounds[rep_index][idx].item()
var.lb = lower_bounds[rep_index][idx].item()
#self.model_lower_bounds[rep_index] = lower_bounds[rep_index].clone()
#self.model_upper_bounds[rep_index] = upper_bounds[rep_index].clone()
elif type(layer) is nn.Conv2d:
conv_ub = upper_bounds[rep_index].view(-1)
conv_lb = lower_bounds[rep_index].view(-1)
for idx, var in enumerate(self.gurobi_vars[rep_index]):
var.ub = conv_ub[idx].item()
var.lb = conv_lb[idx].item()
#self.model_lower_bounds[rep_index] = lower_bounds[rep_index].clone()
#self.model_upper_bounds[rep_index] = upper_bounds[rep_index].clone()
elif type(layer) is nn.ReLU:
# locate relu index and remove all associated constraints
relu_idx = self.loose_dual.pre_relu_indices.index(rep_index-1)
#remove relu constraints
self.model.remove(self.relu_constrs[relu_idx])
self.relu_constrs[relu_idx] = []
# reintroduce relu constraints
pre_lbs = lower_bounds[rep_index-1].view(-1)
pre_ubs = upper_bounds[rep_index-1].view(-1)
for unmasked_idx in self.relu_indices_mask[relu_idx]:
pre_lb = pre_lbs[unmasked_idx].item()
pre_ub = pre_ubs[unmasked_idx].item()
var = self.gurobi_vars[rep_index][unmasked_idx]
pre_var = self.gurobi_vars[rep_index-1][unmasked_idx]
if pre_lb >= 0 and pre_ub >= 0:
# ReLU is always passing
var.lb = pre_lb
var.ub = pre_ub
introduced_constrs.append(self.model.addConstr(pre_var == var))
relu_mask[relu_idx][unmasked_idx] = 1
elif pre_lb <= 0 and pre_ub <= 0:
var.lb = 0
var.ub = 0
relu_mask[relu_idx][unmasked_idx] = 0
else:
var.lb = 0
var.ub = pre_ub
introduced_constrs.append(self.model.addConstr(var >= pre_var))
slope = pre_ub / (pre_ub - pre_lb)
bias = - pre_lb * slope
introduced_constrs.append(self.model.addConstr(var <= slope*pre_var + bias))
elif type(layer) is View:
pass
elif type(layer) is Flatten:
pass
else:
raise NotImplementedError
self.model.update()
rep_index += 1
# compute optimum
assert len(self.gurobi_vars[-1]) == 1, "Network doesn't have scalar output"
self.model.update()
#self.model.reset()
self.model.setObjective(self.gurobi_vars[-1][0], grb.GRB.MINIMIZE)
self.model.optimize()
#assert self.model.status == 2, "LP wasn't optimally solved"
self.check_optimization_success(introduced_constrs)
glb = self.gurobi_vars[-1][0].X
lower_bounds[-1] = torch.tensor([glb])
# get input variable values at which minimum is achieved
inp_size = lower_bounds[0].size()
mini_inp = torch.zeros(inp_size)
if len(inp_size)==1:
# This is a linear input.
for i in range(inp_size[0]):
mini_inp[i] = self.gurobi_vars[0][i].x
else:
for i in range(inp_size[0]):
for j in range(inp_size[1]):
for k in range(inp_size[2]):
mini_inp[i,j,k] = self.gurobi_vars[0][i][j][k].x
gub = self.net(mini_inp.unsqueeze(0)).item()
# remove introduced vars and constraints
self.model.remove(introduced_constrs)
self.model.update()
return gub, glb, mini_inp.unsqueeze(0), relu_mask, lower_bounds, upper_bounds
```
#### File: GNN_branching/plnn/mip_solver.py
```python
import gurobipy as grb
import torch
from itertools import product
from torch import nn
from plnn.modules import View, Flatten
from torch.nn import functional as F
from plnn.dual_network_linear_approximation import LooseDualNetworkApproximation
from plnn.network_linear_approximation import LinearizedNetwork
class MIPNetwork:
def __init__(self, layers):
'''
layers: A list of Pytorch layers containing only Linear/ReLU/MaxPools
'''
self.layers = layers
self.net = nn.Sequential(*layers)
# Initialize a LinearizedNetwork object to determine the lower and
# upper bounds at each layer.
self.lin_net = LinearizedNetwork(layers)
def solve(self, inp_domain, timeout=None):
'''
inp_domain: Tensor containing in each row the lower and upper bound
for the corresponding dimension
Returns:
sat : boolean indicating whether the MIP is satisfiable.
solution: Feasible point if the MIP is satisfiable,
None otherwise.
timeout : Maximum allowed time to run, if is not None
'''
if self.lower_bounds[-1].min() > 0:
print("Early stopping")
# The problem is infeasible, and we haven't setup the MIP
return (False, None, 0)
if timeout is not None:
self.model.setParam('TimeLimit', timeout)
if self.check_obj_value_callback:
def early_stop_cb(model, where):
if where == grb.GRB.Callback.MIP:
best_bound = model.cbGet(grb.GRB.Callback.MIP_OBJBND)
if best_bound > 0:
model.terminate()
if where == grb.GRB.Callback.MIPNODE:
nodeCount = model.cbGet(grb.GRB.Callback.MIPNODE_NODCNT)
if (nodeCount % 100) == 0:
print(f"Running Nb states visited: {nodeCount}")
if where == grb.GRB.Callback.MIPSOL:
obj = model.cbGet(grb.GRB.Callback.MIPSOL_OBJ)
if obj < 0:
# Does it have a chance at being a valid
# counter-example?
# Check it with the network
input_vals = model.cbGetSolution(self.gurobi_vars[0])
with torch.no_grad():
if isinstance(input_vals, list):
inps = torch.Tensor(input_vals).view(1, -1)
else:
assert isinstance(input_vals, grb.tupledict)
inps = torch.Tensor([val for val in input_vals.values()])
inps = inps.view((1,) + self.lower_bounds[0].shape)
out = self.net(inps).squeeze()
# In case there is several output to the network, get the minimum one.
out = out.min().item()
if out < 0:
model.terminate()
else:
def early_stop_cb(model, where):
if where == grb.GRB.Callback.MIPNODE:
nodeCount = model.cbGet(grb.GRB.Callback.MIPNODE_NODCNT)
if (nodeCount % 100) == 0:
print(f"Running Nb states visited: {nodeCount}")
self.model.optimize(early_stop_cb)
nb_visited_states = self.model.nodeCount
if self.model.status is grb.GRB.INFEASIBLE:
# Infeasible: No solution
return (False, None, nb_visited_states)
elif self.model.status is grb.GRB.OPTIMAL:
# There is a feasible solution. Return the feasible solution as well.
len_inp = len(self.gurobi_vars[0])
# Get the input that gives the feasible solution.
#input_vals = model.cbGetSolution(self.gurobi_vars[0])
#inps = torch.Tensor([val for val in input_vals.values()])
#inps = inps.view((1,) + self.lower_bounds[0].shape)
optim_val = self.gurobi_vars[-1][-1].x
return (optim_val < 0, (None, optim_val), nb_visited_states)
elif self.model.status is grb.GRB.INTERRUPTED:
obj_bound = self.model.ObjBound
if obj_bound > 0:
return (False, None, nb_visited_states)
else:
# There is a feasible solution. Return the feasible solution as well.
len_inp = len(self.gurobi_vars[0])
# Get the input that gives the feasible solution.
inp = torch.Tensor(len_inp)
if isinstance(self.gurobi_vars[0], list):
for idx, var in enumerate(self.gurobi_vars[0]):
inp[idx] = var.x
else:
#assert isinstance(self.gurobi_vars[0], grb.tupledict)
inp = torch.zeros_like(self.lower_bounds[0])
for idx, var in self.gurobi_vars[0].items():
inp[idx] = var.x
optim_val = self.gurobi_vars[-1][-1].x
return (optim_val < 0, (inp, optim_val), nb_visited_states)
elif self.model.status is grb.GRB.TIME_LIMIT:
# We timed out, return a None Status
return (None, None, nb_visited_states)
else:
raise Exception("Unexpected Status code")
def tune(self, param_outfile, tune_timeout):
self.model.Params.tuneOutput = 1
self.model.Params.tuneTimeLimit = tune_timeout
self.model.tune()
# Get the best set of parameters
self.model.getTuneResult(0)
self.model.write(param_outfile)
def do_interval_analysis(self, inp_domain):
self.lower_bounds = []
self.upper_bounds = []
self.lower_bounds.append(inp_domain.select(-1, 0))
self.upper_bounds.append(inp_domain.select(-1, 1))
layer_idx = 1
current_lb = self.lower_bounds[-1]
current_ub = self.upper_bounds[-1]
for layer in self.layers:
if isinstance(layer, nn.Linear) or isinstance(layer, nn.Conv2d):
if type(layer) is nn.Linear:
pos_weights = torch.clamp(layer.weight, min=0)
neg_weights = torch.clamp(layer.weight, max=0)
new_layer_lb = torch.mv(pos_weights, current_lb) + \
torch.mv(neg_weights, current_ub) + \
layer.bias
new_layer_ub = torch.mv(pos_weights, current_ub) + \
torch.mv(neg_weights, current_lb) + \
layer.bias
elif type(layer) is nn.Conv2d:
pre_lb = torch.Tensor(current_lb).unsqueeze(0)
pre_ub = torch.Tensor(current_ub).unsqueeze(0)
pos_weight = torch.clamp(layer.weight, 0, None)
neg_weight = torch.clamp(layer.weight, None, 0)
out_lbs = (F.conv2d(pre_lb, pos_weight, layer.bias,
layer.stride, layer.padding, layer.dilation, layer.groups)
+ F.conv2d(pre_ub, neg_weight, None,
layer.stride, layer.padding, layer.dilation, layer.groups))
out_ubs = (F.conv2d(pre_ub, pos_weight, layer.bias,
layer.stride, layer.padding, layer.dilation, layer.groups)
+ F.conv2d(pre_lb, neg_weight, None,
layer.stride, layer.padding, layer.dilation, layer.groups))
new_layer_lb = out_lbs.squeeze(0)
new_layer_ub = out_ubs.squeeze(0)
self.lower_bounds.append(new_layer_lb)
self.upper_bounds.append(new_layer_ub)
current_lb = new_layer_lb
current_ub = new_layer_ub
elif type(layer) == nn.ReLU:
current_lb = torch.clamp(current_lb, min=0)
current_ub = torch.clamp(current_ub, min=0)
elif type(layer) == nn.MaxPool1d:
new_layer_lb = []
new_layer_ub = []
assert layer.padding == 0, "Non supported Maxpool option"
assert layer.dilation == 1, "Non supported Maxpool option"
nb_pre = len(self.lower_bounds[-1])
window_size = layer.kernel_size
stride = layer.stride
pre_start_idx = 0
pre_window_end = pre_start_idx + window_size
while pre_window_end <= nb_pre:
lb = max(current_lb[pre_start_idx:pre_window_end])
ub = max(current_ub[pre_start_idx:pre_window_end])
new_layer_lb.append(lb)
new_layer_ub.append(ub)
pre_start_idx += stride
pre_window_end = pre_start_idx + window_size
current_lb = torch.Tensor(new_layer_lb)
current_ub = torch.Tensor(new_layer_ub)
self.lower_bounds.append(current_lb)
self.upper_bounds.append(current_ub)
elif type(layer) == View:
continue
elif type(layer) == Flatten:
current_lb = current_lb.view(-1)
current_ub = current_ub.view(-1)
else:
raise NotImplementedError
def setup_model(self, inp_domain,
use_obj_function=False,
bounds="opt",
parameter_file=None):
'''
inp_domain: Tensor containing in each row the lower and upper bound
for the corresponding dimension
optimal: If False, don't use any objective function, simply add a constraint on the output
If True, perform optimization and use callback to interrupt the solving when a
counterexample is found
bounds: string, indicate what type of method should be used to get the intermediate bounds
parameter_file: Load a set of parameters for the MIP solver if a path is given.
Setup the model to be optimized by Gurobi
'''
if bounds == "opt":
# First use define_linear_approximation from LinearizedNetwork to
# compute upper and lower bounds to be able to define Ms
self.lin_net.define_linear_approximation(inp_domain)
self.lower_bounds = list(map(torch.Tensor, self.lin_net.lower_bounds))
self.upper_bounds = list(map(torch.Tensor, self.lin_net.upper_bounds))
elif bounds == "interval":
self.do_interval_analysis(inp_domain)
if self.lower_bounds[-1][0] > 0:
# The problem is already guaranteed to be infeasible,
# Let's not waste time setting up the MIP
return
elif bounds == "interval-kw":
self.do_interval_analysis(inp_domain)
kw_dual = LooseDualNetworkApproximation(self.layers)
kw_dual.remove_maxpools(inp_domain, no_opt=True)
lower_bounds, upper_bounds = kw_dual.get_intermediate_bounds(inp_domain)
#print(lower_bounds)
#print(upper_bounds)
# We want to get the best out of interval-analysis and K&W
# TODO: There is a slight problem. To use the K&W code directly, we
# need to make a bunch of changes, notably remove all of the
# Maxpooling and convert them to ReLUs. Quick and temporary fix:
# take the max of both things if the shapes are all the same so
# far, and use the one from interval analysis after the first
# difference.
# If the network are full ReLU, there should be no problem.
# If the network are just full ReLU with a MaxPool at the end,
# that's still okay because we get the best bounds until the
# maxpool, and that's the last thing that we use the bounds for
# This is just going to suck if we have a Maxpool early in the
# network, and even then, that just means we use interval analysis
# so stop complaining.
for i in range(len(lower_bounds)):
if lower_bounds[i].shape == self.lower_bounds[i].shape:
# Keep the best lower bound
lb_diff = lower_bounds[i] - self.lower_bounds[i]
ub_diff = upper_bounds[i] - self.upper_bounds[i]
# print(f"LB Difference (kw to interval) min: {lb_diff.min()} \t max:{lb_diff.max()}")
# print(f"UB Difference (kw to interval) min: {ub_diff.min()} \t max:{ub_diff.max()}")
torch.max(lower_bounds[i], self.lower_bounds[i], out=self.lower_bounds[i])
torch.min(upper_bounds[i], self.upper_bounds[i], out=self.upper_bounds[i])
else:
# Mismatch in dimension.
# Drop it and stop trying to improve the stuff of interval analysis
break
if self.lower_bounds[-1].min() > 0:
# The problem is already guaranteed to be infeasible,
# Let's not waste time setting up the MIP
return
else:
raise NotImplementedError("Unknown bound computation method.")
self.gurobi_vars = []
self.model = grb.Model()
self.model.setParam('OutputFlag', False)
self.model.setParam('Threads', 1)
self.model.setParam('DualReductions', 0)
if parameter_file is not None:
self.model.read(parameter_file)
self.zero_var = self.model.addVar(lb=0, ub=0, obj=0,
vtype=grb.GRB.CONTINUOUS,
name=f'zero')
# First add the input variables as Gurobi variables.
if inp_domain.dim() == 2:
inp_gurobi_vars = self.model.addVars([i for i in range(inp_domain.numel() // 2)],
lb=self.lower_bounds[0],
ub=self.upper_bounds[0],
name='inp')
inp_gurobi_vars = [var for key, var in inp_gurobi_vars.items()]
else:
inp_shape = self.lower_bounds[0].shape
#inp_gurobi_vars = self.model.addVars([chan for chan in range(inp_shape[0])],
# [row for row in range(inp_shape[1])],
# [col for col in range(inp_shape[2])],
# lb=self.lower_bounds[0].numpy(),
# ub=self.upper_bounds[0].numpy(),
# name='inp')
#import pdb; pdb.set_trace()
inp_gurobi_vars = {}
for chan in range(inp_domain.size(0)):
chan_vars = []
for row in range(inp_domain.size(1)):
row_vars = []
for col in range(inp_domain.size(2)):
lb = inp_domain[chan, row, col, 0]
ub = inp_domain[chan, row, col, 1]
v = self.model.addVar(lb=lb, ub=ub, obj=0,
vtype=grb.GRB.CONTINUOUS,
name=f'inp_[{chan},{row},{col}]')
inp_gurobi_vars[(chan, row, col)] = v
self.gurobi_vars.append(inp_gurobi_vars)
layer_idx = 1
for layer in self.layers:
if type(layer) is nn.Linear:
layer_nb_out = layer.out_features
pre_vars = self.gurobi_vars[-1]
if isinstance(pre_vars, grb.tupledict):
pre_vars = [var for key, var in sorted(pre_vars.items())]
# Build all the outputs of the linear layer
new_vars = self.model.addVars([i for i in range(layer_nb_out)],
lb=self.lower_bounds[layer_idx],
ub=self.upper_bounds[layer_idx],
name=f'zhat{layer_idx}')
new_layer_gurobi_vars = [var for key, var in new_vars.items()]
self.model.addConstrs(
((grb.LinExpr(layer.weight[neuron_idx, :], pre_vars)
+ layer.bias[neuron_idx].item()) == new_vars[neuron_idx]
for neuron_idx in range(layer.out_features)),
name=f'lay{layer_idx}'
)
elif type(layer) is nn.Conv2d:
in_shape = self.lower_bounds[layer_idx-1].shape
out_shape = self.lower_bounds[layer_idx].shape
flat_idxs = [elt for elt in product(range(out_shape[0]),
range(out_shape[1]),
range(out_shape[2]))]
flat_out_lbs = [self.lower_bounds[layer_idx][chan, row, col]
for chan, row, col in product(range(out_shape[0]),
range(out_shape[1]),
range(out_shape[2]))]
flat_out_ubs = [self.upper_bounds[layer_idx][chan, row, col]
for chan, row, col in product(range(out_shape[0]),
range(out_shape[1]),
range(out_shape[2]))]
new_layer_gurobi_vars = self.model.addVars(flat_idxs,
lb=flat_out_lbs,
ub=flat_out_ubs,
name=f'zhat{layer_idx}')
coeffs = []
for out_chan_idx in range(out_shape[0]):
coeffs.append(layer.weight[out_chan_idx, :].view(-1))
def make_lin_expr(out_chan_idx, out_row_idx, out_col_idx):
lin_bias = layer.bias[out_chan_idx].item()
lin_coeffs = coeffs[out_chan_idx]
start_row_idx = -layer.padding[0] + layer.stride[0]*out_row_idx
end_row_idx = start_row_idx + layer.weight.shape[2]
start_col_idx = -layer.padding[1] + layer.stride[1]*out_col_idx
end_col_idx = start_col_idx + layer.weight.shape[3]
lin_vars = [
(self.zero_var if ((row_idx < 0) or (row_idx == in_shape[1])
or (col_idx < 0) or (col_idx == in_shape[2]))
else self.gurobi_vars[-1][(chan_idx, row_idx, col_idx)])
for chan_idx in range(in_shape[0])
for row_idx in range(start_row_idx, end_row_idx)
for col_idx in range(start_col_idx, end_col_idx)
]
lin_expr = grb.LinExpr(lin_coeffs, lin_vars) + lin_bias
return lin_expr
constrs = []
for out_chan_idx in range(out_shape[0]):
for out_row_idx in range(out_shape[1]):
for out_col_idx in range(out_shape[2]):
constrs.append(make_lin_expr(out_chan_idx, out_row_idx, out_col_idx)
== new_layer_gurobi_vars[(out_chan_idx, out_row_idx, out_col_idx)])
self.model.addConstrs(constr for constr in constrs)
elif type(layer) == nn.ReLU:
pre_lbs = self.lower_bounds[layer_idx]
pre_ubs = self.upper_bounds[layer_idx]
if isinstance(self.gurobi_vars[-1], grb.tupledict):
amb_mask = (pre_lbs < 0) & (pre_ubs>0)
if amb_mask.sum().item() != 0:
to_new_preubs = pre_ubs[amb_mask]
to_new_prelbs = pre_lbs[amb_mask]
new_var_idxs = torch.nonzero((pre_lbs < 0) & (pre_ubs > 0)).numpy().tolist()
new_var_idxs = [tuple(idxs) for idxs in new_var_idxs]
new_layer_gurobi_vars = self.model.addVars(new_var_idxs,
lb=0,
ub=to_new_preubs,
name=f'z{layer_idx}')
new_binary_vars = self.model.addVars(new_var_idxs,
lb=0, ub=1,
vtype=grb.GRB.BINARY,
name=f'delta{layer_idx}')
flat_new_vars = [new_layer_gurobi_vars[idx] for idx in new_var_idxs]
flat_binary_vars = [new_binary_vars[idx] for idx in new_var_idxs]
pre_amb_vars = [self.gurobi_vars[-1][idx] for idx in new_var_idxs]
# C1: Superior to 0
# C2: Add the constraint that it's superior to the inputs
self.model.addConstrs(
(flat_new_vars[idx] >= pre_amb_vars[idx]
for idx in range(len(flat_new_vars))),
name=f'ReLU_lb{layer_idx}'
)
# C3: Below binary*upper_bound
self.model.addConstrs(
(flat_new_vars[idx] <= to_new_preubs[idx].item() * flat_binary_vars[idx]
for idx in range(len(flat_new_vars))),
name=f'ReLU{layer_idx}_ub1-'
)
# C4: Below binary*lower_bound
self.model.addConstrs(
(flat_new_vars[idx] <= (pre_amb_vars[idx]
- to_new_prelbs[idx].item() * (1 - flat_binary_vars[idx]))
for idx in range(len(flat_new_vars))),
name=f'ReLU{layer_idx}_ub2-'
)
else:
new_layer_gurobi_vars = grb.tupledict()
for pos in torch.nonzero(pre_lbs >= 0).numpy().tolist():
pos = tuple(pos)
new_layer_gurobi_vars[pos] = self.gurobi_vars[-1][pos]
for pos in torch.nonzero(pre_ubs <= 0).numpy().tolist():
new_layer_gurobi_vars[tuple(pos)] = self.zero_var
else:
assert isinstance(self.gurobi_vars[-1][0], grb.Var)
amb_mask = (pre_lbs < 0) & (pre_ubs > 0)
if amb_mask.sum().item() == 0:
pass
# print("WARNING: No ambiguous ReLU at a layer")
else:
to_new_preubs = pre_ubs[amb_mask]
new_var_idxs = torch.nonzero(amb_mask).squeeze(1).numpy().tolist()
new_vars = self.model.addVars(new_var_idxs,
lb=0,
ub=to_new_preubs,
name=f'z{layer_idx}')
new_binary_vars = self.model.addVars(new_var_idxs,
lb=0, ub=1,
vtype=grb.GRB.BINARY,
name=f'delta{layer_idx}')
# C1: Superior to 0
# C2: Add the constraint that it's superior to the inputs
self.model.addConstrs(
(new_vars[idx] >= self.gurobi_vars[-1][idx]
for idx in new_var_idxs),
name=f'ReLU_lb{layer_idx}'
)
# C3: Below binary*upper_bound
self.model.addConstrs(
(new_vars[idx] <= pre_ubs[idx].item() * new_binary_vars[idx]
for idx in new_var_idxs),
name=f'ReLU{layer_idx}_ub1-'
)
# C4: Below binary*lower_bound
self.model.addConstrs(
(new_vars[idx] <= (self.gurobi_vars[-1][idx]
- pre_lbs[idx].item() * (1 - new_binary_vars[idx]))
for idx in new_var_idxs),
name=f'ReLU{layer_idx}_ub2-'
)
# Get all the variables in a list, such that we have the
# output of the layer
new_layer_gurobi_vars = []
new_idx = 0
for idx in range(layer_nb_out):
if pre_lbs[idx] >= 0:
# Pass through variable
new_layer_gurobi_vars.append(self.gurobi_vars[-1][idx])
elif pre_ubs[idx] <= 0:
# Blocked variable
new_layer_gurobi_vars.append(self.zero_var)
else:
new_layer_gurobi_vars.append(new_vars[idx])
layer_idx += 1
elif type(layer) == nn.MaxPool1d:
assert layer.padding == 0, "Non supported Maxpool option"
assert layer.dilation == 1, "Non supported MaxPool option"
nb_pre = len(self.gurobi_vars[-1])
window_size = layer.kernel_size
stride = layer.stride
pre_start_idx = 0
pre_window_end = pre_start_idx + window_size
while pre_window_end <= nb_pre:
ub_max = max(self.upper_bounds[layer_idx-1][pre_start_idx:pre_window_end]).item()
window_bin_vars = []
neuron_idx = pre_start_idx % stride
v = self.model.addVar(vtype=grb.GRB.CONTINUOUS,
lb=-grb.GRB.INFINITY,
ub=grb.GRB.INFINITY,
name=f'MaxPool_out_{layer_idx}_{neuron_idx}')
for pre_var_idx, pre_var in enumerate(self.gurobi_vars[-1][pre_start_idx:pre_window_end]):
lb = self.lower_bounds[layer_idx-1][pre_start_idx + pre_var_idx].item()
b = self.model.addVar(vtype=grb.GRB.BINARY,
name= f'MaxPool_b_{layer_idx}_{neuron_idx}_{pre_var_idx}')
# MIP formulation of max pooling:
#
# y = max(x_1, x_2, ..., x_n)
#
# Introduce binary variables d_1, d_2, ..., d_n:
# d_i = i if x_i is the maximum value, 0 otherwise
#
# We know the lower (l_i) and upper bounds (u_i) for x_i
#
# Denote the maximum of the upper_bounds of all inputs x_i as u_max
#
# MIP must then satisfy the following constraints:
#
# Constr_1: l_i <= x_i <= u_i
# Constr_2: y >= x_i
# Constr_3: y <= x_i + (u_max - l_i)*(1 - d_i)
# Constr_4: sum(d_1, d_2, ..., d_n) yer= 1
# Constr_1 is already satisfied due to the implementation of LinearizedNetworks.
# Constr_2
self.model.addConstr(v >= pre_var)
# Constr_3
self.model.addConstr(v <= pre_var + (ub_max - lb)*(1-b))
window_bin_vars.append(b)
# Constr_4
self.model.addConstr(sum(window_bin_vars) == 1)
self.model.update()
pre_start_idx += stride
pre_window_end = pre_start_idx + window_size
new_layer_gurobi_vars.append(v)
elif isinstance(layer, View) or isinstance(layer, Flatten):
continue
else:
raise NotImplementedError
self.gurobi_vars.append(new_layer_gurobi_vars)
if len(self.gurobi_vars[-1]) == 1:
# The network has a scalar output, it works like this.
pass
else:
# The network has multiple outputs, we need to encode that the
# minimum is below 0, let's add a variable here that corresponds to
# the minimum
min_var = self.model.addVar(vtype=grb.GRB.CONTINUOUS,
lb=self.lower_bounds[-1].min().item(),
ub=self.upper_bounds[-1].min().item(),
name="final_output")
self.model.addConstrs(
(min_var <= self.gurobi_vars[-1][out_idx]
for out_idx in range(len(self.gurobi_vars[-1]))),
name=f'final_constraint_min_ub'
)
bin_min_vars = self.model.addVars(range(len(self.gurobi_vars[-1])),
vtype=grb.GRB.BINARY,
lb=0, ub=1,
name='final_binary')
out_lbmin = self.lower_bounds[-1].min()
self.model.addConstrs(
(min_var >= (self.gurobi_vars[-1][out_idx]
+ (out_lbmin - self.upper_bounds[-1][out_idx]).item() * (1 - bin_min_vars[out_idx]))
for out_idx in range(len(self.gurobi_vars[-1]))),
name=f'final_constraint_min_lb'
)
self.model.addConstr(sum(var for var in bin_min_vars.values()) == 1)
self.gurobi_vars.append([min_var])
self.lower_bounds.append(self.lower_bounds[-1].min())
self.upper_bounds.append(self.upper_bounds[-1].min())
# Add the final constraint that the output must be less than or equal
# to zero.
if not use_obj_function:
self.model.addConstr(self.gurobi_vars[-1][0] <= 0)
self.model.setObjective(0, grb.GRB.MAXIMIZE)
self.check_obj_value_callback = False
else:
# Set the minimization of the network output
self.model.setObjective(self.gurobi_vars[-1][-1], grb.GRB.MINIMIZE)
self.check_obj_value_callback = True
# Optimize the model.
self.model.update()
#self.model.write('new_debug.lp')
``` |
{
"source": "jingyuexing/py-emmet",
"score": 3
} |
#### File: emmet/abbreviation/convert.py
```python
from .parser import TokenGroup, TokenElement, TokenAttribute, is_quote, is_bracket
from .tokenizer import tokens
from .stringify import stringify
class ConvertState:
__slots__ = ('inserted', 'text', 'repeat_guard', 'repeaters', 'variables',
'_text_inserted')
def __init__(self, text: str = None, variables={}, max_repeat=None):
self.inserted = False
self.text = text
self.repeat_guard = max_repeat if max_repeat is not None else 1000000
self.variables = variables
self.repeaters = []
self._text_inserted = False
def get_text(self, pos: int):
self._text_inserted = True
if isinstance(self.text, list):
value = self.text[pos] if pos is not None else '\n'.join(self.text)
else:
value = self.text or ''
return value
def get_variable(self, name: str):
return self.variables.get(name) if self.variables else name
class Abbreviation:
__slots__ = ('type', 'children')
def __init__(self):
self.type = 'Abbreviation'
self.children = []
class AbbreviationNode:
__slots__ = ('type', 'name', 'value', 'repeat', 'attributes', 'children', 'self_closing')
def __init__(self, node: TokenElement, state: ConvertState):
self.type = 'AbbreviationNode'
self.name = stringify_name(node.name, state) if node.name else None
self.value = stringify_value(node.value, state) if node.value else None
self.attributes = None
self.children = []
self.repeat = clone_repeater(node.repeat) if node.repeat else None
self.self_closing = node.self_close
"Indicates current element is self-closing, e.g. should not contain closing pair"
class AbbreviationAttribute:
__slots__ = ('name', 'value', 'value_type', 'boolean', 'implied')
def __init__(self, name: str, value: list, value_type: str, boolean=False, implied=False):
self.name = name
self.value = value
self.value_type = value_type
"Indicates type of value stored in `.value` property"
self.boolean = boolean
"Attribute is boolean (e.g.name equals value)"
self.implied = implied
"Attribute is implied (e.g.must be outputted only if contains non-null value)"
def copy(self):
return AbbreviationAttribute(self.name, self.value, self.value_type, self.boolean, self.implied)
def convert(abbr: TokenGroup, options={}):
"Converts given token-based abbreviation into simplified and unrolled node-based abbreviation"
text = options.get('text')
state = ConvertState(text, options.get('variables'), options.get('max_repeat'))
result = Abbreviation()
result.children = convert_group(abbr, state)
if text is not None and not state._text_inserted:
# Text given but no implicitly repeated elements: insert it into deepest child
deepest = deepest_node(result.children[-1])
if deepest:
tx = '\n'.join(text) if isinstance(text, list) else text or ''
insert_text(deepest, tx)
return result
def convert_statement(node: TokenElement, state: ConvertState):
result = []
if node.repeat:
# Node is repeated: we should create copies of given node
# and supply context token with actual repeater state
original = node.repeat
repeat = clone_repeater(node.repeat)
if repeat.implicit and isinstance(state.text, list):
repeat.count = len(state.text)
else:
repeat.count = repeat.count or 1
state.repeaters.append(repeat)
i = 0
while i < repeat.count:
repeat.value = i
node.repeat = repeat
items = convert_group(node, state) if is_group(node) else convert_element(node, state)
if repeat.implicit and not state.inserted:
# It’s an implicit repeater but no repeater placeholders found inside,
# we should insert text into deepest node
target = items[-1]
deepest = deepest_node(target) if target else None
if deepest:
insert_text(deepest, state.get_text(repeat.value))
result += items
# We should output at least one repeated item even if it’s reached
# repeat limit
state.repeat_guard -= 1
if state.repeat_guard <= 0: break
i += 1
state.repeaters.pop()
node.repeat = original
if repeat.implicit: state.inserted = True
else:
result += convert_group(node, state) if is_group(node) else convert_element(node, state)
return result
def convert_element(node: TokenElement, state: ConvertState):
elem = AbbreviationNode(node, state)
result = [elem]
for child in node.elements:
elem.children += convert_statement(child, state)
if node.attributes:
elem.attributes = [convert_attribute(attr, state) for attr in node.attributes]
# In case if current node is a text-only snippet without fields, we should
# put all children as siblings
if not elem.name and elem.attributes is None and elem.value and not some(elem.value, is_field):
result += elem.children
elem.children = []
return result
def convert_group(node: TokenGroup, state: ConvertState):
result = []
for child in node.elements:
result += convert_statement(child, state)
if node.repeat:
result = attach_repeater(result, node.repeat)
return result
def convert_attribute(node: TokenAttribute, state: ConvertState):
attr = create_attribute(node, state)
if node.value:
tokens = node.value[:]
if is_quote(tokens[0]):
# It’s a quoted value: remove quotes from output but mark attribute
# value as quoted
quote = tokens.pop(0)
if len(tokens) and tokens[-1].type == quote.type:
tokens.pop()
attr.value_type = 'singleQuote' if quote.single else 'doubleQuote'
elif is_bracket(tokens[0], 'expression', True):
# Value is expression: remove brackets but mark value type
attr.value_type = 'expression'
tokens.pop(0)
if tokens and is_bracket(tokens[-1], 'expression', False):
tokens.pop()
attr.value = stringify_value(tokens, state)
return attr
def create_attribute(node: TokenAttribute, state: ConvertState):
name = stringify_name(node.name, state) if node.name else None
value_type = 'expression' if node.expression else 'raw'
boolean = False
implied = False
if name:
if name[-1] == '.':
boolean = True
name = name[0:-1]
if name[0] == '!':
implied = True
name = name[1:]
return AbbreviationAttribute(name, None, value_type, boolean, implied)
def stringify_name(tokens: list, state: ConvertState):
"Converts given token list to string"
return ''.join([stringify(token, state) for token in tokens])
def stringify_value(token_list: list, state: ConvertState):
"Converts given token list to value list"
result = []
accum = []
for token in token_list:
if is_field(token):
# We should keep original fields in output since some editors has their
# own syntax for field or doesn’t support fields at all so we should
# capture actual field location in output stream
if accum:
result.append(''.join(accum))
accum = []
result.append(token)
else:
accum.append(stringify(token, state))
if (accum):
result.append(''.join(accum))
return result
def is_group(node):
return isinstance(node, TokenGroup)
def is_field(token):
return isinstance(token, tokens.Field) and token.index is not None
def deepest_node(node: AbbreviationNode):
return deepest_node(node.children[-1]) if node.children else node
def insert_text(node: AbbreviationNode, text: str):
if node.value:
last_token = node.value[-1]
if isinstance(last_token, str):
node.value[-1] += text
else:
node.value.append(text)
else:
node.value = [text]
def attach_repeater(items: list, repeater: tokens.Repeater):
for item in items:
if not item.repeat:
item.repeat = clone_repeater(repeater)
return items
def clone_repeater(repeater: tokens.Repeater):
return tokens.Repeater(repeater.count, repeater.value, repeater.implicit)
def some(items: list, fn: callable):
for item in items:
if fn(item): return True
return False
```
#### File: abbreviation/parser/__init__.py
```python
from ...token_scanner import TokenScanner, TokenScannerException
from ..tokenizer import tokens
class TokenAttribute:
__slots__ = ('name', 'value', 'expression')
def __init__(self, name: list=None, value: list=None, expression: bool=False):
self.name = name
self.value = value
self.expression = expression
class TokenElement:
__slots__ = ('type', 'name', 'attributes', 'value', 'repeat', 'self_close', 'elements')
def __init__(self):
self.type = 'TokenElement'
self.name = None
self.attributes = None
self.value = None
self.repeat = None
self.self_close = False
self.elements = []
class TokenGroup:
__slots__ = ('type', 'elements', 'repeat')
def __init__(self):
self.type = 'TokenGroup'
self.elements = []
self.repeat = None
def parse(abbr: list, options: dict={}):
scanner = TokenScanner(abbr)
result = statements(scanner, options)
if scanner.readable():
raise scanner.error('Unexpected character')
return result
def statements(scanner: TokenScanner, options: dict):
result = TokenGroup()
ctx = result
node = None
stack = []
while scanner.readable():
node = element(scanner, options) or group(scanner, options)
if node:
ctx.elements.append(node)
if scanner.consume(is_child_operator):
stack.append(ctx)
ctx = node
elif scanner.consume(is_sibling_operator):
continue
elif is_climb_operator(scanner.peek()):
while scanner.consume(is_climb_operator):
if len(stack): ctx = stack.pop()
else:
break
return result
def group(scanner: TokenScanner, options: dict):
"Consumes group from given scanner"
if scanner.consume(is_group_start):
result = statements(scanner, options)
token = scanner.next()
if is_bracket(token, 'group', False):
result.repeat = repeater(scanner)
return result
raise scanner.error('Expecting )', token)
def element(scanner: TokenScanner, options: dict):
"Consumes single element from given scanner"
attr = None
elem = TokenElement()
if element_name(scanner, options):
elem.name = scanner.slice()
while scanner.readable():
scanner.start = scanner.pos
if not elem.repeat and not is_empty(elem) and scanner.consume(is_repeater):
elem.repeat = scanner.tokens[scanner.pos - 1]
elif elem.value is None and text(scanner):
elem.value = get_text(scanner)
else:
attr = short_attribute(scanner, 'id', options) or short_attribute(scanner, 'class', options) or attribute_set(scanner)
if attr is not None:
if not isinstance(attr, list):
attr = [attr]
if elem.attributes is None:
elem.attributes = attr[:]
else:
elem.attributes += attr
else:
if not is_empty(elem) and scanner.consume(is_close_operator):
elem.self_close = True
if not elem.repeat and scanner.consume(is_repeater):
elem.repeat = scanner.tokens[scanner.pos - 1]
break
return elem if not is_empty(elem) else None
def attribute_set(scanner: TokenScanner):
"Consumes attribute set from given scanner"
if scanner.consume(is_attribute_set_start):
attributes = []
attr = None
while scanner.readable():
attr = attribute(scanner)
if attr:
attributes.append(attr)
elif scanner.consume(is_attribute_set_end):
break
elif not scanner.consume(is_white_space):
raise scanner.error('Unexpected "%s" token' % scanner.peek().type)
return attributes
def short_attribute(scanner: TokenScanner, attr_type: str, options: dict):
"Consumes attribute shorthand (class or id) from given scanner"
if is_operator(scanner.peek(), attr_type):
scanner.pos += 1
attr = TokenAttribute([create_literal(attr_type)])
# Consume expression after shorthand start for React-like components
if options.get('jsx') and text(scanner):
attr.value = get_text(scanner)
attr.expression = True
else:
attr.value = scanner.slice() if literal(scanner) else None
return attr
def attribute(scanner: TokenScanner):
if quoted(scanner):
# Consumed quoted value: it’s a value for default attribute
return TokenAttribute(value=scanner.slice())
if literal(scanner, True):
name = scanner.slice()
value = None
if scanner.consume(is_equals) and (quoted(scanner) or literal(scanner, True)):
value = scanner.slice()
return TokenAttribute(name, value)
def repeater(scanner: TokenScanner):
if is_repeater(scanner.peek()):
return scanner.next()
def quoted(scanner: TokenScanner):
"Consumes quoted value from given scanner, if possible"
start = scanner.pos
quote = scanner.peek()
if is_quote(quote):
scanner.pos += 1
while scanner.readable():
if is_quote(scanner.next(), quote.single):
scanner.start = start
return True
raise scanner.error('Unclosed quote', quote)
return False
def literal(scanner: TokenScanner, allow_brackets=False):
"Consumes literal (unquoted value) from given scanner"
start = scanner.pos
brackets = {
'attribute': 0,
'expression': 0,
'group': 0
}
while scanner.readable():
token = scanner.peek()
if brackets['expression']:
# If we’re inside expression, we should consume all content in it
if is_bracket(token, 'expression'):
brackets[token.context] += 1 if token.open else -1
elif is_quote(token) or is_operator(token) or is_white_space(token) or is_repeater(token):
break
elif is_bracket(token):
if not allow_brackets: break
if token.open:
brackets[token.context] += 1
elif not brackets[token.context]:
# Stop if found unmatched closing brace: it must be handled
# by parent consumer
break
else:
brackets[token.context] -= 1
scanner.pos += 1
if start != scanner.pos:
scanner.start = start
return True
return False
def element_name(scanner: TokenScanner, options: dict):
"Consumes element name from given scanner"
start = scanner.pos
if options.get('jsx') and scanner.consume(is_capitalized_literal):
# Check for edge case: consume immediate capitalized class names
# for React-like components, e.g. `Foo.Bar.Baz`
while scanner.readable():
pos = scanner.pos
if not scanner.consume(is_class_name_operator) or not scanner.consume(is_capitalized_literal):
scanner.pos = pos
break
while scanner.readable() and scanner.consume(is_element_name):
pass
if scanner.pos != start:
scanner.start = start
return True
return False
def text(scanner: TokenScanner):
"Consumes text value from given scanner"
start = scanner.pos
if scanner.consume(is_text_start):
brackets = 0
while scanner.readable():
token = scanner.next()
if is_bracket(token, 'expression'):
if token.open:
brackets += 1
elif not brackets:
break
else:
brackets -= 1
scanner.start = start
return True
return False
def get_text(scanner: TokenScanner):
start = scanner.start
end = scanner.pos
if is_bracket(scanner.tokens[start], 'expression', True):
start += 1
if is_bracket(scanner.tokens[end - 1], 'expression', False):
end -= 1
return scanner.slice(start, end)
def is_bracket(token: tokens.Bracket, context: str=None, is_open=None):
return isinstance(token, tokens.Bracket) and \
(context is None or token.context == context) and \
(is_open is None or token.open == is_open)
def is_operator(token: tokens.Operator, op_type: str=None):
return isinstance(token, tokens.Operator) and (not op_type or token.operator == op_type)
def is_quote(token: tokens.Quote, is_single=None):
return isinstance(token, tokens.Quote) and (is_single is None or token.single == is_single)
def is_white_space(token: tokens.WhiteSpace):
return isinstance(token, tokens.WhiteSpace)
def is_equals(token: tokens.Operator):
return is_operator(token, 'equal')
def is_repeater(token: tokens.Repeater):
return isinstance(token, tokens.Repeater)
def is_literal(token: tokens.Literal):
return isinstance(token, tokens.Literal)
def is_capitalized_literal(token: tokens.Literal):
if is_literal(token):
return 'A' <= token.value[0] <= 'Z'
return False
def is_element_name(token: tokens.Literal):
return is_literal(token) or isinstance(token, tokens.RepeaterNumber) or isinstance(token, tokens.RepeaterPlaceholder)
def is_class_name_operator(token: tokens.Operator):
return is_operator(token, 'class')
def is_attribute_set_start(token: tokens.Bracket):
return is_bracket(token, 'attribute', True)
def is_attribute_set_end(token: tokens.Bracket):
return is_bracket(token, 'attribute', False)
def is_text_start(token: tokens.Bracket):
return is_bracket(token, 'expression', True)
def is_group_start(token: tokens.Bracket):
return is_bracket(token, 'group', True)
def create_literal(value: str):
return tokens.Literal(value)
def is_empty(elem: TokenElement):
return elem.name is None and elem.value is None and elem.attributes is None
def is_child_operator(token: tokens.Operator):
return is_operator(token, 'child')
def is_sibling_operator(token: tokens.Operator):
return is_operator(token, 'sibling')
def is_climb_operator(token: tokens.Operator):
return is_operator(token, 'climb')
def is_close_operator(token: tokens.Operator):
return is_operator(token, 'close')
```
#### File: emmet/abbreviation/stringify.py
```python
from .tokenizer import tokens
operators = {
'child': '>',
'class': '.',
'climb': '^',
'id': '#',
'equal': '=',
'close': '/',
'sibling': '+'
}
def stringify(token: tokens.Token, state):
visitor = globals().get(token.type)
if not visitor:
raise Exception('Unknown token %s' % token.type)
return visitor(token, state)
def Literal(token: tokens.Literal, state):
return token.value
def Quote(token: tokens.Quote, state):
return '\'' if token.single else '"'
def Bracket(token: tokens.Bracket, state):
if token.context == 'attribute':
return '[' if token.open else ']'
if token.context == 'expression':
return '{' if token.open else '}'
return '(' if token.open else '}'
def Operator(token: tokens.Operator, state):
global operators
return operators.get(token.operator)
def Field(token: tokens.Field, state):
if token.index is not None:
# It’s a field: by default, return TextMate-compatible field
fmt = '${%d:%s}' if token.name else '${%s}'
return fmt % (token.index, token.name)
if token.name:
# It’s a variable
return state.get_variable(token.name)
return ''
def RepeaterPlaceholder(token: tokens.RepeaterPlaceholder, state):
# Find closest implicit repeater
repeater = None
repeater_list = state.repeaters[:]
repeater_list.reverse()
for r in repeater_list:
if r.implicit:
repeater = r
break
state.inserted = True
return state.get_text(repeater.value) if repeater else None
def RepeaterNumber(token: tokens.RepeaterNumber, state):
value = 1
last_ix = len(state.repeaters) - 1
if last_ix >= 0:
repeater = state.repeaters[-1]
if token.reverse:
value = token.base + repeater.count - repeater.value - 1
else:
value = token.base + repeater.value
if token.parent:
parent_ix = max(0, last_ix - token.parent)
if parent_ix != last_ix:
value += repeater.count * state.repeaters[parent_ix].value
result = str(value)
prefix = '0' * max(0, token.size - len(result))
return prefix + result
def WhiteSpace(token, state):
return ' '
```
#### File: abbreviation/tokenizer/__init__.py
```python
from ...scanner import Scanner
from ...scanner_utils import is_quote, is_space, is_number, is_alpha
from .utils import Chars, escaped
from . import tokens
OPERATOR_TYPES = dict([
(Chars.Child, 'child'),
(Chars.Sibling, 'sibling'),
(Chars.Climb, 'climb'),
(Chars.Dot, 'class'),
(Chars.Hash, 'id'),
(Chars.Slash, 'close'),
(Chars.Equals, 'equal')
])
def tokenize(source: str):
scanner = Scanner(source)
result = []
ctx = {
'group': 0,
'attribute': 0,
'expression': 0,
'quote': None
}
while not scanner.eof():
ch = scanner.peek()
token = field(scanner, ctx) or \
repeater_placeholder(scanner) or \
repeater_number(scanner) or \
repeater(scanner) or \
white_space(scanner) or \
literal(scanner, ctx) or \
operator(scanner) or \
quote(scanner) or \
bracket(scanner)
if token:
result.append(token)
if token.type == 'Quote':
ctx['quote'] = None if ch == ctx['quote'] else ch
elif token.type == 'Bracket':
ctx[token.context] += 1 if token.open else -1
else:
raise scanner.error('Unexpected character')
return result
def literal(scanner: Scanner, ctx: dict):
start = scanner.pos
value = []
while not scanner.eof():
"Consumes literal from given scanner"
ch = scanner.peek()
if ch == ctx['quote'] or ch == Chars.Dollar or is_allowed_operator(ch, ctx):
# 1. Found matching quote
# 2. The `$` character has special meaning in every context
# 3. Depending on context, some characters should be treated as operators
break
if ctx['expression'] and ch == Chars.CurlyBracketClose:
break
if not ctx['quote'] and not ctx['expression'] and (
is_allowed_space(ch, ctx) or
is_allowed_repeater(ch, ctx) or
is_quote(ch) or
bracket_type(ch)):
# Stop for characters not allowed in unquoted literal
break
if escaped(scanner):
value.append(scanner.current())
else:
value.append(scanner.next())
if start != scanner.pos:
scanner.start = start
return tokens.Literal(''.join(value), start, scanner.pos)
def white_space(scanner: Scanner):
"Consumes white space characters as string literal from given scanner"
start = scanner.pos
if scanner.eat_while(is_space):
return tokens.WhiteSpace(start, scanner.pos)
def quote(scanner: Scanner):
"Consumes quote from given scanner"
ch = scanner.peek()
if is_quote(ch):
return tokens.Quote(ch == Chars.SingleQuote, inc_pos(scanner), scanner.pos)
def bracket(scanner: Scanner):
"Consumes bracket from given scanner"
ch = scanner.peek()
context = bracket_type(ch)
if context:
return tokens.Bracket(is_open_bracket(ch), context, inc_pos(scanner), scanner.pos)
def operator(scanner: Scanner):
"Consumes operator from given scanner"
op = operator_type(scanner.peek())
if op:
return tokens.Operator(op, inc_pos(scanner), scanner.pos)
def repeater(scanner: Scanner):
"Consumes node repeat token from current scanner position and returns its parsed value"
start = scanner.pos
if scanner.eat(Chars.Asterisk):
scanner.start = scanner.pos
count = 1
implicit = False
if scanner.eat_while(is_number):
count = int(scanner.current())
else:
implicit = True
return tokens.Repeater(count, 0, implicit, start, scanner.pos)
def repeater_placeholder(scanner: Scanner):
"Consumes repeater placeholder `$#` from given scanner"
start = scanner.pos
if scanner.eat(Chars.Dollar) and scanner.eat(Chars.Hash):
return tokens.RepeaterPlaceholder(None, start, scanner.pos)
scanner.pos = start
def repeater_number(scanner: Scanner):
"Consumes numbering token like `$` from given scanner state"
start = scanner.pos
if scanner.eat_while(Chars.Dollar):
size = scanner.pos - start
reverse = False
base = 1
parent = 0
if scanner.eat(Chars.At):
# Consume numbering modifiers
while scanner.eat(Chars.Climb):
parent += 1
reverse = scanner.eat(Chars.Dash)
scanner.start = scanner.pos
if scanner.eat_while(is_number):
base = int(scanner.current())
scanner.start = start
return tokens.RepeaterNumber(size, reverse, base, parent, start, scanner.pos)
def field(scanner: Scanner, ctx: dict):
start = scanner.pos
# Fields are allowed inside expressions and attributes
if (ctx['expression'] or ctx['attribute']) and scanner.eat(Chars.Dollar) and scanner.eat(Chars.CurlyBracketOpen):
scanner.start = scanner.pos
index = None
name = ''
if scanner.eat_while(is_number):
# It’s a field
index = int(scanner.current())
if scanner.eat(Chars.Colon):
name = consume_placeholder(scanner)
elif is_alpha(scanner.peek()):
# It’s a variable
name = consume_placeholder(scanner)
if scanner.eat(Chars.CurlyBracketClose):
return tokens.Field(name, index, start, scanner.pos)
raise scanner.error('Expecting }')
# If we reached here then there’s no valid field here, revert
# back to starting position
scanner.pos = start
def consume_placeholder(scanner: Scanner):
"Consumes a placeholder: value right after `:` in field. Could be empty"
stack = []
scanner.start = scanner.pos
while not scanner.eof():
if scanner.eat(Chars.CurlyBracketOpen):
stack.append(scanner.pos)
elif scanner.eat(Chars.CurlyBracketClose):
if not len(stack):
scanner.pos -= 1
break
stack.pop()
else:
scanner.pos += 1
if len(stack):
scanner.pos = stack.pop()
raise scanner.error('Expecting }')
return scanner.current()
def is_allowed_operator(ch: str, ctx: dict):
"Check if given character code is an operator and it’s allowed in current context"
op = operator_type(ch)
if not op or ctx['quote'] or ctx['expression']:
# No operators inside quoted values or expressions
return False
# Inside attributes, only `equals` is allowed
return not ctx['attribute'] or op == 'equal'
def is_allowed_space(ch: str, ctx: dict):
"""
Check if given character is a space character and is allowed to be consumed
as a space token in current context
"""
return is_space(ch) and not ctx['expression']
def is_allowed_repeater(ch: str, ctx: dict):
"Check if given character can be consumed as repeater in current context"
return ch == Chars.Asterisk and not ctx['attribute'] and not ctx['expression']
def bracket_type(ch: str):
"If given character is a bracket, returns it’s type"
if ch in (Chars.RoundBracketOpen, Chars.RoundBracketClose):
return 'group'
if ch in (Chars.SquareBracketOpen, Chars.SquareBracketClose):
return 'attribute'
if ch in (Chars.CurlyBracketOpen, Chars.CurlyBracketClose):
return 'expression'
def operator_type(ch: int):
"If given character is an operator, returns it’s type"
global OPERATOR_TYPES
return OPERATOR_TYPES.get(ch)
def is_open_bracket(ch: str):
"Check if given character is an open bracket"
return ch in (Chars.CurlyBracketOpen, Chars.SquareBracketOpen, Chars.RoundBracketOpen)
def inc_pos(scanner: Scanner):
pos = scanner.pos
scanner.pos += 1
return pos
```
#### File: emmet/action_utils/css.py
```python
from ..css_matcher import scan, split_value, TokenType
from .utils import push_range, SelectItemModel
class CSSSection:
__slots__ = ('start', 'end', 'body_start', 'body_end', 'properties')
def __init__(self, start: int, end: int, body_start: int, body_end: int, properties: list=None):
self.start = start
self.end = end
self.body_start = body_start
self.body_end = body_end
self.properties = properties
def to_json(self):
result = {
'start': self.start,
'end': self.end,
'body_start': self.body_start,
'body_end': self.body_end
}
if self.properties:
result['properties'] = [prop.to_json() for prop in self.properties]
return result
class CSSProperty:
__slots__ = ('name', 'value', 'value_tokens', 'before', 'after')
def __init__(self, code: str, name: list, before: int, start: int, end: int, delimiter: int, offset=0):
self.name = (offset + name[0], offset + name[1])
self.value = (offset + start, offset + end)
self.value_tokens = split_value(code[start:end], offset + start)
self.before = before
self.after = offset + delimiter + 1
def to_json(self):
return {
'name': self.name,
'value': self.value,
'value_tokens': self.value_tokens,
'before': self.before,
'after': self.after
}
class ParseState:
__slots__ = ('type', 'start', 'end', 'value_start', 'value_end', 'value_delimiter')
def __init__(self):
self.type = None
self.start = -1
self.end = -1
self.value_start = -1
self.value_end = -1
self.value_delimiter = -1
def get_css_section(code: str, pos: int, properties=False) -> CSSSection:
"""
Returns context CSS section for given location in source code
:param properties Parse inner properties
"""
stack = []
pool = []
result = []
result.append(None) # Skip pylint warnings
def scan_callback(token_type: str, start: int, end: int, delimiter: int):
if start > pos and not stack:
return False
if token_type == TokenType.Selector:
stack.append(alloc_range(pool, start, end, delimiter))
elif token_type == TokenType.BlockEnd:
sel = stack and stack.pop()
if sel and sel[0] <= pos <= end:
result[0] = CSSSection(sel[0], end, sel[2] + 1, start)
return False
release_range(pool, sel)
scan(code, scan_callback)
section = result[0]
if section and properties:
section.properties = parse_properties(code, section.body_start, section.body_end)
return section
def select_item_css(code: str, pos: int, is_prev=False) -> SelectItemModel:
"Returns list of ranges for Select Next/Previous CSS Item action"
if is_prev:
return select_previous_item(code, pos)
return select_next_item(code, pos)
def select_next_item(code: str, pos: int) -> SelectItemModel:
"Returns regions for selecting next item in CSS"
result = []
result.append(None)
pending_property = []
pending_property.append(None)
def scan_callback(token_type: str, start: int, end: int, delimiter: int):
if start < pos:
return
if token_type == TokenType.Selector:
result[0] = SelectItemModel(start, end, [(start, end)])
return False
elif token_type == TokenType.PropertyName:
pending_property[0] = (start, end, delimiter)
elif token_type == TokenType.PropertyValue:
section = SelectItemModel(start, delimiter + 1 if delimiter != -1 else end, [])
result[0] = section
if pending_property[0]:
# Full property range
prop = pending_property[0]
section.start = prop[0]
push_range(section.ranges, (prop[0], section.end))
# Full value range
push_range(section.ranges, (start, end))
# Value fragments
for r in split_value(code[start:end]):
push_range(section.ranges, (r[0] + start, r[1] + start))
return False
elif pending_property[0]:
prop = pending_property[0]
result[0] = SelectItemModel(prop[0], prop[1], [(prop[0], prop[1])])
return False
scan(code, scan_callback)
return result[0]
def select_previous_item(code: str, pos: int) -> SelectItemModel:
"Returns regions for selecting previous item in CSS"
state = ParseState()
def scan_callback(token_type, start, end, delimiter):
# Accumulate context until we reach given position
if start >= pos and token_type != TokenType.PropertyValue:
return False
if token_type in (TokenType.Selector, TokenType.PropertyName):
state.start = start
state.end = end
state.type = token_type
state.value_start = state.value_end = state.value_delimiter = -1
elif token_type == TokenType.PropertyValue:
state.value_start = start
state.value_end = end
state.value_delimiter = delimiter
scan(code, scan_callback)
if state.type == TokenType.Selector:
return SelectItemModel(state.start, state.end, [(state.start, state.end)])
if state.type == TokenType.PropertyName:
result = SelectItemModel(state.start, state.end, [])
if state.value_start != -1:
result.end = state.value_delimiter + 1 if state.value_delimiter != -1 else state.value_end
# Full property range
push_range(result.ranges, (state.start, result.end))
# Full value range
push_range(result.ranges, (state.value_start, state.value_end))
# Value fragments
for r in split_value(code[state.value_start:state.value_end]):
push_range(result.ranges, (r[0] + state.value_start, r[1] + state.value_start))
else:
push_range(result.ranges, (state.start, state.end))
return result
class ParsePropertiesState:
__slots__ = ('pending_name', 'nested', 'before')
def __init__(self, before: int):
self.pending_name = None
self.nested = 0
self.before= before
def parse_properties(code: str, parse_from=0, parse_to=None) -> list:
"""
Parses properties in `from:to` fragment of `code`. Note that `from:to` must
point to CSS section content, e.g. *inside* `{` and `}` (or top-level code context),
all properties found in nested sections will be ignored
"""
if parse_to is None:
parse_to = len(code)
fragment = code[parse_from:parse_to]
result = []
pool = []
state = ParsePropertiesState(parse_from)
def scan_callback(token_type, start: int, end: int, delimiter: int):
if token_type == TokenType.Selector:
state.nested += 1
elif token_type == TokenType.BlockEnd:
state.nested -= 1
state.before = parse_from + end
elif not state.nested:
if token_type == TokenType.PropertyName:
if state.pending_name:
# Create property with empty value
value_pos = state.pending_name[2]
result.append(
CSSProperty(fragment, state.pending_name, state.before,
value_pos, value_pos, value_pos,
parse_from))
release_range(pool, state.pending_name)
state.before = parse_from + start
state.pending_name = alloc_range(pool, start, end, delimiter)
elif token_type == TokenType.PropertyValue:
if state.pending_name:
result.append(
CSSProperty(fragment, state.pending_name, state.before,
start, end, delimiter, parse_from))
release_range(pool, state.pending_name)
state.pending_name = None
state.before = parse_from + delimiter + 1
scan(fragment, scan_callback)
return result
def alloc_range(pool: list, start: int, end: int, delimiter: int) -> list:
"Allocates new token range from pool"
if pool:
rng = pool.pop()
rng[0] = start
rng[1] = end
rng[2] = delimiter
return rng
return [start, end, delimiter]
def release_range(pool: list, rng: list):
"Releases given token range and pushes it back into the pool"
if rng:
pool.append(rng)
```
#### File: emmet/action_utils/html.py
```python
from ..html_matcher import scan, attributes, ScannerOptions, ElementType, AttributeToken
from .utils import push_range, SelectItemModel, token_list
class ContextTag:
__slots__ = ('name', 'type', 'start', 'end', 'attributes')
def __init__(self, name: str, elem_type: ElementType, start: int, end: int, attrs: list=None):
self.name = name
self.type = elem_type
self.start = start
self.end = end
self.attributes = attrs
def to_json(self):
"Returns JSON representation of current object"
result = {
'name': self.name,
'type': self.type,
'start': self.start,
'end': self.end
}
if self.attributes:
result['attributes'] = [attr.to_json() for attr in self.attributes]
return result
def get_open_tag(code: str, pos: int) -> ContextTag:
"""
Check if there’s open or self-closing tag under given `pos` location in source code.
If found, returns its name, range in source and parsed attributes
"""
opt = ScannerOptions()
tag = []
tag.append(None)
# Find open or self-closing tag, closest to given position
def scan_callback(name: str, elem_type: ElementType, start: int, end: int):
if start < pos < end:
tag[0] = ContextTag(name, elem_type, start, end)
if elem_type in (ElementType.Open, ElementType.SelfClose):
tag[0].attributes = shift_attribute_ranges(attributes(code[start:end], name), start)
return False
if end > pos:
return False
scan(code, scan_callback, opt.special)
return tag[0]
def select_item_html(code: str, pos: int, is_prev=False, options: dict=None) -> SelectItemModel:
"Returns list of ranges for Select Next/Previous Item action"
if is_prev:
return select_previous_item(code, pos, options)
return select_next_item(code, pos, options)
def select_next_item(code: str, pos: int, options: dict=None) -> SelectItemModel:
"Returns list of ranges for Select Next Item action"
opt = ScannerOptions(options)
result = []
result.append(None)
# Find open or self-closing tag, closest to given position
def scan_callback(name, elem_type, start, end):
if elem_type in (ElementType.Open, ElementType.SelfClose) and end > pos:
# Found open or self-closing tag
result[0] = get_tag_selection_model(code, name, start, end)
return False
scan(code, scan_callback, opt.special)
return result[0]
def select_previous_item(code: str, pos: int, options: dict=None) -> SelectItemModel:
"Returns list of ranges for Select Previous Item action"
opt = ScannerOptions(options)
last = {
'name': '',
'type': None,
'start': -1,
'end': -1
}
# We should find the closest open or self-closing tag left to given `pos`.
def scan_callback(name: str, elem_type: ElementType, start: int, end: int):
if start >= pos:
return False
if elem_type in (ElementType.Open, ElementType.SelfClose):
# Found open or self-closing tag
last['name'] = name
last['type'] = elem_type
last['start'] = start
last['end'] = end
scan(code, scan_callback, opt.special)
if last['type'] is not None:
return get_tag_selection_model(code, last['name'], last['start'], last['end'])
def get_tag_selection_model(code: str, name: str, start: int, end: int) -> SelectItemModel:
"""
Parses open or self-closing tag in `start:end` range of `code` and returns its
model for selecting items
:param code Document source code
:param name Name of matched tag
"""
# Add tag name range
ranges = [(start + 1, start + 1 + len(name))]
# Parse and add attributes ranges
tag_src = code[start:end]
for attr in attributes(tag_src, name):
if attr.value is not None:
# Attribute with value
push_range(ranges, (start + attr.name_start, start + attr.value_end))
# Add (unquoted) value range
val = value_range(attr)
if val[0] != val[1]:
push_range(ranges, (start + val[0], start + val[1]))
if attr.name == 'class':
# For class names, split value into space-separated tokens
for token in token_list(tag_src[val[0]:val[1]], start + val[0]):
push_range(ranges, token)
else:
# Attribute without value (boolean)
push_range(ranges, (start + attr.name_start, start + attr.name_end))
return SelectItemModel(start, end, ranges)
def value_range(attr: AttributeToken) -> tuple:
"Returns value range of given attribute. Value range is unquoted."
value = attr.value
ch = value[0]
last_ch = value[-1]
if ch == '"' or ch == '\'':
return (
attr.value_start + 1,
attr.value_end - (1 if last_ch == ch else 0)
)
if ch == '{' and last_ch == '}':
return (
attr.value_start + 1,
attr.value_end - 1
)
return (attr.value_start, attr.value_end)
def shift_attribute_ranges(attrs: list, offset: int):
for attr in attrs:
attr.name_start += offset
attr.name_end += offset
if attr.value is not None:
attr.value_start += offset
attr.value_end += offset
return attrs
```
#### File: emmet/action_utils/utils.py
```python
from ..scanner_utils import is_space
class SelectItemModel:
__slots__ = ('start', 'end', 'ranges')
def __init__(self, start: int, end: int, ranges: list=None):
self.start = start
self.end = end
self.ranges = ranges
def to_json(self):
return {
'start': self.start,
'end': self.end,
'ranges': self.ranges
}
def push_range(ranges: list, rng: list):
prev = ranges and ranges[-1]
if rng and rng[0] != rng[1] and (not prev or prev[0] != rng[0] or prev[1] != rng[1]):
ranges.append(rng)
def token_list(value: str, offset=0):
"Returns ranges of tokens in given value. Tokens are space-separated words."
ranges = []
l = len(value)
pos = 0
start = 0
end = 0
while pos < l:
end = pos
ch = value[pos]
pos += 1
if is_space(ch):
if start != end:
ranges.append((offset + start, offset + end))
while pos < l and is_space(value[pos]):
pos += 1
start = pos
if start != pos:
ranges.append((offset + start, offset + pos))
return ranges
```
#### File: emmet/css_abbreviation/__init__.py
```python
from .tokenizer import tokenize, tokens
from .parser import parser, CSSProperty, CSSValue, FunctionCall
from ..scanner import ScannerException
def parse(abbr: str, options={}):
"Parses given abbreviation into property set"
try:
tokens = tokenize(abbr, options.get('value', False)) if isinstance(abbr, str) else abbr
return parser(tokens, options)
except ScannerException as err:
if isinstance(abbr, str):
err.message += '\n%s\n%s^' % (abbr, '-' * err.pos)
raise err
```
#### File: emmet/css_abbreviation/parser.py
```python
from .tokenizer import tokens
from .tokenizer.tokens import OperatorType
from ..token_scanner import TokenScanner
class FunctionCall:
__slots__ = ('type', 'name', 'arguments')
def __init__(self, name: str, arguments: list):
self.type = 'FunctionCall'
self.name = name
self.arguments = arguments
class CSSValue:
__slots__ = ('type', 'value')
def __init__(self, value: list):
self.type = 'CSSValue'
self.value = value
class CSSProperty:
__slots__ = ('name', 'value', 'important')
def __init__(self, name: str, value: list, important=False):
self.name = name
self.value = value
self.important = important
def parser(token_list: list, options={}):
scanner = TokenScanner(token_list)
result = []
while scanner.readable():
prop = consume_property(scanner, options)
if prop:
result.append(prop)
elif not scanner.consume(is_sibling_operator):
raise scanner.error('Unexpected token')
return result
def consume_property(scanner: TokenScanner, options={}):
"Consumes single CSS property"
name = None
important = False
value_fragment = None
value = []
token = scanner.peek()
if not options.get('value') and is_literal(token) and not is_function_start(scanner):
scanner.pos += 1
name = token.value
# Consume any following value delimiter after property name
scanner.consume(is_value_delimiter)
# Skip whitespace right after property name, if any
scanner.consume(is_white_space)
while scanner.readable():
if scanner.consume(is_important):
important = True
else:
value_fragment = consume_value(scanner)
if value_fragment:
value.append(value_fragment)
elif not scanner.consume(is_fragment_delimiter):
break
if name or value or important:
return CSSProperty(name, value, important)
def consume_value(scanner: TokenScanner):
"Consumes single value fragment, e.g. all value tokens before comma"
result = []
token = None
args = None
while scanner.readable():
token = scanner.peek()
if is_value(token):
scanner.pos += 1
args = consume_arguments(scanner) if is_literal(token) else None
if args is not None:
result.append(FunctionCall(token.value, args))
else:
result.append(token)
elif is_value_delimiter(token):
scanner.pos += 1
else:
break
return CSSValue(result) if result else None
def consume_arguments(scanner: TokenScanner):
start = scanner.pos
if scanner.consume(is_open_bracket):
args = []
value = None
while scanner.readable() and not scanner.consume(is_close_bracket):
value = consume_value(scanner)
if value:
args.append(value)
elif not scanner.consume(is_white_space) and not scanner.consume(is_argument_delimiter):
raise scanner.error('Unexpected token')
scanner.start = start
return args
def is_literal(token: tokens.Token):
return isinstance(token, tokens.Literal)
def is_bracket(token: tokens.Token, is_open=None):
return isinstance(token, tokens.Bracket) and (is_open is None or token.open == is_open)
def is_open_bracket(token: tokens.Token):
return is_bracket(token, True)
def is_close_bracket(token: tokens.Token):
return is_bracket(token, False)
def is_white_space(token: tokens.Token):
return isinstance(token, tokens.WhiteSpace)
def is_operator(token: tokens.Token, operator: OperatorType=None):
return isinstance(token, tokens.Operator) and (not operator or token.operator == operator)
def is_sibling_operator(token: tokens.Token):
return is_operator(token, OperatorType.Sibling)
def is_argument_delimiter(token: tokens.Token):
return is_operator(token, OperatorType.ArgumentDelimiter)
def is_fragment_delimiter(token: tokens.Token):
return is_argument_delimiter(token) or is_white_space(token)
def is_important(token: tokens.Token):
return is_operator(token, OperatorType.Important)
def is_value(token: tokens.Token):
return isinstance(token, tokens.StringValue) or \
isinstance(token, tokens.ColorValue) or \
isinstance(token, tokens.NumberValue) or \
isinstance(token, tokens.Literal) or \
isinstance(token, tokens.Field)
def is_value_delimiter(token: tokens.Token):
return is_white_space(token) or \
is_operator(token, OperatorType.PropertyDelimiter) or \
is_operator(token, OperatorType.ValueDelimiter)
def is_function_start(scanner: TokenScanner):
max_ix = len(scanner.tokens) - 1
if scanner.pos < max_ix:
t1 = scanner.tokens[scanner.pos]
t2 = scanner.tokens[scanner.pos + 1]
return t1 and t2 and is_literal(t1) and isinstance(t2, tokens.Bracket)
```
#### File: emmet/css_matcher/scan.py
```python
from ..scanner import Scanner
from ..scanner_utils import is_quote, is_space
class ScanState:
__slots__ = ('start', 'end', 'property_delimiter', 'property_start',
'property_end', 'expression')
def __init__(self):
self.start = -1
"Start location of currently consumed token"
self.end = -1
"End location of currently consumed token"
self.property_delimiter = -1
"Location of possible property delimiter"
self.property_start = -1
"Location of possible property start"
self.property_end = -1
"Location of possible property end"
self.expression = 0
"In expression context"
def reset(self):
self.start = self.end = self.property_start = self.property_end = self.property_delimiter = -1
class TokenType:
Selector = 'selector'
PropertyName = 'propertyName'
PropertyValue = 'propertyValue'
BlockEnd = 'blockEnd'
class Chars:
LeftCurly = '{'
RightCurly = '}'
Asterisk = '*'
Slash = '/'
Colon = ':'
Semicolon = ';'
Backslash = '\\'
LeftRound = '('
RightRound = ')'
LF = '\n'
CR = '\r'
def scan(source: str, callback: callable):
"""
Performs fast scan of given stylesheet (CSS, LESS, SCSS) source code and runs
callback for each token and its range found. The goal of this parser is to quickly
determine document structure: selector, property, value and block end.
It doesn’t provide detailed info about CSS atoms like compound selectors,
operators, quoted string etc. to reduce memory allocations: this data can be
parsed later on demand.
"""
scanner = Scanner(source)
state = ScanState()
block_end = False
def notify(token_type: TokenType, delimiter: int=None, start: int=None, end: int=None):
if delimiter is None: delimiter = scanner.start
if start is None: start = state.start
if end is None: end = state.end
return callback(token_type, start, end, delimiter) is False
while not scanner.eof():
if comment(scanner) or whitespace(scanner):
continue
scanner.start = scanner.pos
block_end = scanner.eat(Chars.RightCurly)
if block_end or scanner.eat(Chars.Semicolon):
# Block or property end
if state.property_start != -1:
# We have pending property
if notify(TokenType.PropertyName, state.property_delimiter, state.property_start, state.property_end):
return
if state.start == -1:
# Explicit property value state: emit empty value
state.start = state.end = scanner.start
if notify(TokenType.PropertyValue):
return
elif state.start != -1 and notify(TokenType.PropertyName):
# Flush consumed token
return
if block_end:
state.start = scanner.start
state.end = scanner.pos
if notify(TokenType.BlockEnd):
return
state.reset()
elif scanner.eat(Chars.LeftCurly):
# Block start
if state.start == -1 and state.property_start == -1:
# No consumed selector, emit empty value as selector start
state.start = state.end = scanner.pos
if state.property_start != -1:
# Now we know that value that looks like property name-value pair
# was actually a selector
state.start = state.property_start
if notify(TokenType.Selector):
return
state.reset()
elif scanner.eat(Chars.Colon) and not is_known_selector_colon(scanner, state):
# Colon could be one of the following:
# — property delimiter: `foo: bar`, must be in block context
# — variable delimiter: `$foo: bar`, could be anywhere
# — pseudo-selector: `a:hover`, could be anywhere (for LESS and SCSS)
# — media query expression: `min-width: 100px`, must be inside expression context
# Since I can’t easily detect `:` meaning for sure, we’ll update state
# to accumulate possible property name-value pair or selector
if state.property_start == -1:
state.property_start = state.start
state.property_end = state.end
state.property_delimiter = scanner.pos - 1
state.start = state.end = -1
else:
if state.start == -1:
state.start = scanner.pos
if scanner.eat(Chars.LeftRound):
state.expression += 1
elif scanner.eat(Chars.RightRound):
state.expression -= 1
elif not literal(scanner):
scanner.pos += 1
state.end = scanner.pos
if state.property_start != -1:
# Pending property name
if notify(TokenType.PropertyName, state.property_delimiter, state.property_start, state.property_end):
return
if state.start != -1:
# There’s pending token in state
notify(TokenType.PropertyValue if state.property_start != -1 else TokenType.PropertyName, -1)
def whitespace(scanner: Scanner):
return scanner.eat_while(is_space)
def comment(scanner: Scanner):
"""
Consumes CSS comments from scanner: `/* * /`
It’s possible that comment may not have closing part
"""
start = scanner.pos
if scanner.eat(Chars.Slash) and scanner.eat(Chars.Asterisk):
scanner.start = start
while not scanner.eof():
if scanner.eat(Chars.Asterisk):
if scanner.eat(Chars.Slash):
return True
continue
scanner.pos += 1
return True
else:
scanner.pos = start
return False
def literal(scanner: Scanner):
ch = scanner.peek()
if is_quote(ch):
scanner.start = scanner.pos
scanner.pos += 1
while not scanner.eof():
if scanner.eat(ch) or scanner.eat(Chars.LF) or scanner.eat(Chars.CR):
break
# Skip escape character, if any
scanner.eat(Chars.Backslash)
scanner.pos += 1
# Do not throw if string is incomplete
return True
def is_known_selector_colon(scanner: Scanner, state: ScanState):
"Check if current state is a known selector context for `:` delimiter"
# Either inside expression like `(min-width: 10px)` or pseudo-element `::before`
return state.expression or scanner.eat_while(Chars.Colon)
```
#### File: emmet/extract_abbreviation/__init__.py
```python
import re
from .reader import BackwardScanner
from .is_html import is_html as is_at_html_tag, is_quote
from .brackets import Brackets, BRACE_PAIRS
from ..scanner_utils import is_alpha, is_number
class ExtractedAbbreviation:
__slots__ = ('abbreviation', 'location', 'start', 'end')
def __init__(self, abbreviation: str, location: int, start: int, end: int):
self.abbreviation = abbreviation
"Extracted abbreviation"
self.location = location
"Location of abbreviation in input string"
self.start = start
"Start location of matched abbreviation, including prefix"
self.end = end
"End location of extracted abbreviation"
def __eq__(self, other):
if isinstance(other, ExtractedAbbreviation):
return self.abbreviation == other.abbreviation and \
self.location == other.location and \
self.start == other.start and \
self.end == other.end
raise NotImplementedError
def __repr__(self):
return repr({
'abbreviation': self.abbreviation,
'location': self.location,
'start': self.start,
'end': self.end,
})
SPECIAL_CHARS = '#.*:$-_!@%^+>/'
def extract_abbreviation(line: str, pos: int=None, options={}) -> ExtractedAbbreviation:
"""
Extracts abbreviation from given line of source code.
Options:
Parser options:
lookAhead: bool
Allow parser to look ahead of `pos` index for searching of missing
abbreviation parts. Most editors automatically inserts closing braces for
`[`, `{` and `(`, which will most likely be right after current caret position.
So in order to properly expand abbreviation, user must explicitly move
caret right after auto-inserted braces. With this option enabled, parser
will search for closing braces right after `pos`. Default is `true`
type: 'markup' | 'stylesheet'
Type of context syntax of expanded abbreviation.
In 'stylesheet' syntax, brackets `[]` and `{}` are not supported thus
not extracted.
prefix: str
A string that should precede abbreviation in order to make it successfully
extracted. If given, the abbreviation will be extracted from the nearest
`prefix` occurrence.
"""
if pos is None:
pos = len(line)
opt = create_options(options)
# make sure `pos` is within line range
pos = min(len(line), max(0, pos))
if opt.get('lookAhead'):
pos = offset_past_auto_closed(line, pos, opt)
start = get_start_offset(line, pos, opt.get('prefix', ''))
if start == -1:
return None
scanner = BackwardScanner(line, start)
scanner.pos = pos
stack = []
while not scanner.sol():
ch = scanner.peek()
if Brackets.CurlyR in stack:
if ch == Brackets.CurlyR:
stack.append(ch)
scanner.pos -= 1
continue
if ch != Brackets.CurlyL:
scanner.pos -= 1
continue
if is_close_brace(ch, opt.get('type')):
stack.append(ch)
elif is_open_brace(ch, opt.get('type')):
if not stack or stack.pop() != BRACE_PAIRS[ch]:
# unexpected brace
break
elif Brackets.SquareR in stack or Brackets.CurlyR in stack:
# respect all characters inside attribute sets or text nodes
scanner.pos -= 1
continue
elif is_at_html_tag(scanner) or not is_abbreviation(ch):
break
scanner.pos -= 1
if not stack and scanner.pos != pos:
# Found something, remove some invalid symbols from the
# beginning and return abbreviation
abbreviation = re.sub(r'^[*+>^]+', '', line[scanner.pos:pos])
prefix = opt.get('prefix', '')
start = start - len(prefix) if prefix else pos - len(abbreviation)
return ExtractedAbbreviation(abbreviation, pos - len(abbreviation), start, pos)
def offset_past_auto_closed(line: str, pos: int, options: dict):
"""
Returns new `line` index which is right after characters beyond `pos` that
editor will likely automatically close, e.g. }, ], and quotes
"""
# closing quote is allowed only as a next character
if pos < len(line) and is_quote(line[pos]): pos += 1
# offset pointer until non-autoclosed character is found
while pos < len(line) and is_close_brace(line[pos], options.get('type')):
pos += 1
return pos
def get_start_offset(line: str, pos: int, prefix: str):
"""
Returns start offset (left limit) in `line` where we should stop looking for
abbreviation: it’s nearest to `pos` location of `prefix` token
"""
if not prefix: return 0
scanner = BackwardScanner(line)
scanner.pos = pos
while not scanner.sol():
if consume_pair(scanner, Brackets.SquareR, Brackets.SquareL) or consume_pair(scanner, Brackets.CurlyR, Brackets.CurlyL):
continue
result = scanner.pos
if consume_list(scanner, prefix):
return result
scanner.pos -= 1
return -1
def consume_pair(scanner: BackwardScanner, close_ch: str, open_ch: str):
"Consumes full character pair, if possible"
start = scanner.pos
if scanner.consume(close_ch):
while not scanner.sol():
if scanner.consume(open_ch):
return True
scanner.pos -= 1
scanner.pos = start
return False
def consume_list(scanner: BackwardScanner, arr: list):
"Consumes all character codes from given list, right-to-left, if possible"
start = scanner.pos
consumed = False
i = len(arr) - 1
while i >= 0 and not scanner.sol():
if not scanner.consume(arr[i]):
break
consumed = i == 0
i -= 1
if not consumed:
scanner.pos = start
return consumed
def is_abbreviation(ch: str):
return is_alpha(ch) or is_number(ch) or ch in SPECIAL_CHARS
def is_open_brace(ch: str, syntax: str):
return ch == Brackets.RoundL or (syntax == 'markup' and ch in (Brackets.SquareL, Brackets.CurlyL))
def is_close_brace(ch: str, syntax: str):
return ch == Brackets.RoundR or (syntax == 'markup' and ch in (Brackets.SquareR, Brackets.CurlyR))
def create_options(opt=None):
options = {
'type': 'markup',
'lookAhead': True,
'prefix': '',
}
if opt:
options.update(opt)
return options
```
#### File: emmet/html_matcher/__init__.py
```python
from .utils import ScannerOptions, ElementType
from .scan import scan
from .attributes import attributes, AttributeToken
class MatchedTag:
__slots__ = ('name', 'attributes', 'open', 'close')
def __init__(self, name: str, attrs: list, open_range: tuple, close_range: tuple=None):
self.name = name
"Name of matched tag"
self.attributes = attrs
"List of tag attributes"
self.open = open_range
"Range of opening tag"
self.close = close_range
"Range of closing tag. If absent, tag is self-closing"
class Tag:
__slots__ = ('name', 'start', 'end')
def __init__(self, name: str, start: int, end: int):
self.name = name
self.start = start
self.end = end
class InwardTag:
__slots__ = ('name', 'ranges', 'first_child')
def __init__(self, name: str, ranges: list, first_child=None):
self.name = name
self.ranges = ranges
self.first_child = first_child
class BalancedTag:
__slots__ = ('name', 'open', 'close')
def __init__(self, name: str, open_range: tuple, close_range: tuple=None):
self.name = name
"Name of matched tag"
self.open = open_range
"Range of opening tag"
self.close = close_range
"Range of closing tag. If absent, tag is self-closing"
def to_json(self):
json = {
'name': self.name,
'open': list(self.open)
}
if self.close:
json['close'] = list(self.close)
return json
def match(source: str, pos: int, opt: dict=None) -> MatchedTag:
"Finds matched tag for given `pos` location in XML/HTML `source`"
# Since we expect large input document, we’ll use pooling technique
# for storing tag data to reduce memory pressure and improve performance
pool = []
stack = []
options = ScannerOptions(opt)
result = [None]
def scan_callback(name: str, elem_type: ElementType, start: int, end: int):
if elem_type == ElementType.Open and is_self_close(name, options):
# Found empty element in HTML mode, mark is as self-closing
elem_type = ElementType.SelfClose
if elem_type == ElementType.Open:
# Allocate tag object from pool
stack.append(alloc_tag(pool, name, start, end))
elif elem_type == ElementType.SelfClose:
if start < pos < end:
# Matched given self-closing tag
result[0] = MatchedTag(name, get_attributes(source, start, end, name), (start, end))
return False
else:
tag = stack and stack[-1]
if tag and tag.name == name:
# Matching closing tag found
if tag.start < pos < end:
result[0] = MatchedTag(name, get_attributes(source, tag.start, tag.end, name), (tag.start, tag.end), (start, end))
return False
if stack:
# Release tag object for further re-use
release_tag(pool, stack.pop())
scan(source, scan_callback, options.special)
return result[0]
def balanced_outward(source: str, pos: int, opt: dict=None) -> list:
"""
Returns balanced tag model: a list of all XML/HTML tags that could possibly match
given location when moving in outward direction
"""
pool = []
stack = []
options = ScannerOptions(opt)
result = []
def scan_callback(name: str, elem_type: ElementType, start: int, end: int):
if elem_type == ElementType.Close:
tag = stack and stack[-1]
if tag and tag.name == name:
# XXX check for invalid tag names?
# Matching closing tag found, check if matched pair is a candidate
# for outward balancing
if tag.start < pos < end:
result.append(BalancedTag(name, (tag.start, tag.end), (start, end)))
# Release tag object for further re-use
release_tag(pool, stack.pop())
elif elem_type == ElementType.SelfClose or is_self_close(name, options):
if start < pos < end:
# Matched self-closed tag
result.append(BalancedTag(name, (start, end)))
else:
stack.append(alloc_tag(pool, name, start, end))
scan(source, scan_callback, options.special)
return result
def balanced_inward(source: str, pos: int, opt: dict=None) -> list:
"""
Returns balanced tag model: a list of all XML/HTML tags that could possibly match
given location when moving in inward direction
"""
# Collecting tags for inward balancing is a bit trickier: we have to store
# first child of every matched tag until we find the one that matches given
# location
pool = []
stack = []
options = ScannerOptions(opt)
result = []
def alloc(name: str, start: int, end: int):
if pool:
tag = pool.pop()
tag.name = name
tag.ranges.append(start)
tag.ranges.append(end)
return tag
return InwardTag(name, [start, end])
def release(tag: InwardTag):
tag.ranges.clear()
tag.first_child = None
pool.append(tag)
def scan_callback(name: str, elem_type: ElementType, start: int, end: int):
if elem_type == ElementType.Close:
if not stack:
# Some sort of lone closing tag, ignore it
return
tag = stack[-1]
if tag.name == name:
# XXX check for invalid tag names?
# Matching closing tag found, check if matched pair is a candidate
# for outward balancing
if tag.ranges[0] <= pos <= end:
result.append(BalancedTag(name, (tag.ranges[0], tag.ranges[1]), (start, end)))
while tag.first_child:
child = tag.first_child
res = BalancedTag(child.name, (child.ranges[0], child.ranges[1]))
if len(child.ranges) > 2:
res.close = (child.ranges[2], child.ranges[3])
result.append(res)
release(tag)
tag = child
return False
else:
stack.pop()
parent = stack and stack[-1]
if parent and not parent.first_child:
# No first child in parent node: store current tag
tag.ranges.append(start)
tag.ranges.append(end)
parent.first_child = tag
else:
release(tag)
elif elem_type == ElementType.SelfClose or is_self_close(name, options):
if start < pos < end:
# Matched self-closed tag, no need to look further
result.append(BalancedTag(name, (start, end)))
return False
parent = stack and stack[-1]
if parent and not parent.first_child:
parent.first_child = alloc(name, start, end)
else:
stack.append(alloc(name, start, end))
scan(source, scan_callback, options.special)
return result
def alloc_tag(pool: list, name: str, start: int, end: int):
if pool:
tag = pool.pop()
tag.name = name
tag.start = start
tag.end = end
return tag
return Tag(name, start, end)
def release_tag(pool: list, tag: Tag):
if tag:
pool.append(tag)
def get_attributes(source: str, start: int, end: int, name: str=None):
"Returns parsed attributes from given source"
attrs = attributes(source[start:end], name)
for attr in attrs:
attr.name_start += start
attr.name_end += start
if attr.value is not None:
attr.value_start += start
attr.value_end += start
return attrs
def is_self_close(name: str, options: ScannerOptions):
"Check if given tag is self-close for current parsing context"
return not options.xml and name in options.empty
```
#### File: emmet/html_matcher/utils.py
```python
from ..scanner import Scanner
from ..scanner_utils import is_space, is_quote, is_alpha, is_number, eat_pair
class ElementType:
Open = 1
Close = 2
SelfClose = 3
class Chars:
Dash = '-'
Dot = '.'
Slash = '/'
Colon = ':'
LeftAngle = '<'
RightAngle = '>'
LeftRound = '('
RightRound = ')'
LeftSquare = '['
RightSquare = ']'
LeftCurly = '{'
RightCurly = '}'
Underscore = '_'
Equals = '='
Asterisk = '*'
Hash = '#'
scan_opt = { 'throws': False }
default_special = {
'style': None,
'script': ['', 'text/javascript', 'application/x-javascript', 'javascript', 'typescript', 'ts', 'coffee', 'coffeescript']
}
default_empty = ['img', 'meta', 'link', 'br', 'base', 'hr', 'area', 'wbr', 'col', 'embed', 'input', 'param', 'source', 'track']
class ScannerOptions:
__slots__ = ('xml', 'special', 'empty')
def __init__(self, options: dict=None):
if options is None:
options = {}
self.xml = options.get('xml', False)
"""
Parses given source as XML document. It alters how should-be-empty
elements are treated: for example, in XML mode parser will try to locate
closing pair for `<br>` tag
"""
self.special = options.get('special', default_special)
"""
List of tags that should have special parsing rules, e.g. should not parse
inner content and skip to closing tag. Key is a tag name that should be
considered special and value is either empty (always mark element as special)
or list of `type` attribute values, which, if present with one of this value,
make element special
"""
self.empty = options.get('empty', default_empty)
"""
List of elements that should be treated as empty (e.g. without closing tag)
in non-XML syntax
"""
def consume_array(scanner: Scanner, codes: list):
"Consumes array of character codes from given scanner"
start = scanner.pos
for ch in codes:
if not scanner.eat(ch):
scanner.pos = start
return False
scanner.start = start
return True
def consume_section(scanner: Scanner, prefix: list, suffix: list, allow_unclosed=False):
"""
Consumes section from given string which starts with `open` character codes
and ends with `close` character codes
:return `true` if section was consumed
"""
start = scanner.pos
if consume_array(scanner, prefix):
# consumed `<!--`, read next until we find ending part or reach the end of input
while not scanner.eof():
if consume_array(scanner, suffix):
scanner.start = start
return True
scanner.pos += 1
# unclosed section is allowed
if allow_unclosed:
scanner.start = start
return True
scanner.pos = start
return False
# unable to find section, revert to initial position
scanner.pos = start
return False
def name_start_char(ch: str):
# Limited XML spec: https://www.w3.org/TR/xml/#NT-NameStartChar
o = ord(ch) if ch else 0
return is_alpha(ch) or \
ch == Chars.Colon or \
ch == Chars.Underscore or \
0xC0 <= o <= 0xD6 or \
0xD8 <= o <= 0xF6 or \
0xF8 <= o <= 0x2FF or \
0x370 <= o <= 0x37D or \
0x37F <= o <= 0x1FFF
def name_char(ch: str):
"Check if given character can be used in a tag or attribute name"
# Limited XML spec: https://www.w3.org/TR/xml/#NT-NameChar
o = ord(ch) if ch else 0
return name_start_char(ch) or \
ch == Chars.Dash or \
ch == Chars.Dot or \
is_number(ch) or \
o == 0xB7 or \
0x0300 <= o <= 0x036F
def ident(scanner: Scanner):
"Consumes identifier from given scanner"
start = scanner.pos
if scanner.eat(name_start_char):
scanner.eat_while(name_char)
scanner.start = start
return True
return False
def is_terminator(ch: str):
"Check if given code is tag terminator"
return ch == Chars.RightAngle or ch == Chars.Slash
def is_unquoted(ch: str):
"Check if given character code is valid unquoted value"
return ch and not is_quote(ch) and not is_space(ch) and not is_terminator(ch)
def consume_paired(scanner: Scanner):
"""
Consumes paired tokens (like `[` and `]`) with respect of nesting and embedded
quoted values
:return `true` if paired token was consumed
"""
global scan_opt
return eat_pair(scanner, Chars.LeftAngle, Chars.RightAngle, scan_opt) or \
eat_pair(scanner, Chars.LeftRound, Chars.RightRound, scan_opt) or \
eat_pair(scanner, Chars.LeftSquare, Chars.RightSquare, scan_opt) or \
eat_pair(scanner, Chars.LeftCurly, Chars.RightCurly, scan_opt)
def get_unquoted_value(value: str):
"Returns unquoted value of given string"
# Trim quotes
if value and is_quote(value[0]):
value = value[1:]
if is_quote(value[-1]):
value = value[0:-1]
return value
```
#### File: markup/format/__init__.py
```python
from .html import html
from .indent_format import indent_format
from ...abbreviation import Abbreviation
from ...config import Config
def haml(abbr: Abbreviation, config: Config):
return indent_format(abbr, config, {
'beforeName': '%',
'beforeAttribute': '(',
'afterAttribute': ')',
'glueAttribute': ' ',
'afterTextLine': ' |',
'booleanValue': 'true',
'selfClose': '/'
})
def pug(abbr: Abbreviation, config: Config):
return indent_format(abbr, config, {
'beforeAttribute': '(',
'afterAttribute': ')',
'glueAttribute': ', ',
'beforeTextLine': '| ',
'selfClose': '/' if config.options.get('output.selfClosingStyle') == 'xml' else ''
})
def slim(abbr: Abbreviation, config: Config):
return indent_format(abbr, config, {
'beforeAttribute': ' ',
'glueAttribute': ' ',
'beforeTextLine': '| ',
'selfClose': '/'
})
```
#### File: markup/format/utils.py
```python
from .walk import WalkState
from ...abbreviation import AbbreviationNode, AbbreviationAttribute
from ...abbreviation.tokenizer.tokens import Field
from ...config import Config
from ...output_stream import OutputStream, is_inline
caret = [Field('', 0)]
"Default caret token"
def is_snippet(node: AbbreviationNode):
"Check if given node is a snippet: a node without name and attributes"
return node and not node.name and not node.attributes if node else False
def is_inline_element(node: AbbreviationNode, config: Config):
"""
Check if given node is inline-level element, e.g. element with explicitly
defined node name
"""
return is_inline(node, config) if node else False
def push_tokens(tokens: list, state: WalkState):
out = state.out
largest_index = -1
for t in tokens:
if isinstance(t, str):
out.push_string(t)
else:
out.push_field(state.field + t.index, t.name)
if t.index > largest_index:
largest_index = t.index
if largest_index != -1:
state.field += largest_index + 1
def split_by_lines(tokens: list):
"""
Splits given value token by lines: returns array where each entry is a token list
for a single line
"""
result = []
line = []
for t in tokens:
if isinstance(t, str):
lines = t.splitlines()
line.append(lines.pop(0) if lines else '')
while lines:
result.append(line)
line = [lines.pop(0) or '']
else:
line.append(t)
if line:
result.append(line)
return result
def should_output_attribute(attr: AbbreviationAttribute):
# In case if attribute is implied, check if it has a defined value:
# either non-empty value or quoted empty value
return not attr.implied or attr.value_type != 'raw' or attr.value
```
#### File: markup/format/walk.py
```python
from ...abbreviation import Abbreviation, AbbreviationNode
from ...config import Config
from ...output_stream import OutputStream
class WalkState:
__slots__ = ('current', 'parent', 'ancestors', 'config', 'out', 'field')
def __init__(self, config: Config):
self.current = None
"Context node"
self.parent = None
"Immediate parent of currently iterated method"
self.ancestors = []
"List of all ancestors of context node"
self.config = config
"Current output config"
self.out = OutputStream(config.options)
"Output stream"
self.field = 1
"Current field index, used to output field marks for editor tabstops"
def walk(abbr: Abbreviation, visitor: callable, state: WalkState):
def callback(ctx: AbbreviationNode, index: int, items: list):
parent = state.parent
current = state.current
state.parent = current
state.current = ctx
visitor(ctx, index, items, state, walk_next)
state.current = current
state.parent = parent
def walk_next(node, index, items):
state.ancestors.append(state.current)
callback(node, index, items)
state.ancestors.pop()
for index, child in enumerate(abbr.children):
callback(child, index, abbr.children)
```
#### File: emmet/math_expression/extract.py
```python
from ..scanner_utils import is_space, is_number
from .parser import Operator, is_sign, is_operator
class BackwardScanner:
__slots__ = ('text', 'pos')
def __init__(self, text: str, pos=0):
self.text = text
self.pos = pos
def prev(self):
return self.text[self.pos - 1] if self.pos else ''
def cur(self):
return self.text[self.pos] if self.pos < len(self.text) else ''
def extract(text: str, pos=None, options: dict=None) -> tuple:
"""
Extracts math expression from given text at specified position.
Expression is extracted in backward direction.
Options:
:lookAhead bool
Allow capturing extra expression characters right after start position.
Useful for extracting expressions from text editor source which inserts
paired characters like `(` and `)` to properly extract expression past
caret position
:whitespace: bool
Allow whitespace in extracted expressions
"""
if pos is None:
pos = len(text)
opt = { 'lookAhead': True, 'whitespace': True }
if options: opt.update(options)
scanner = BackwardScanner(text, pos)
if opt['lookAhead'] and scanner.cur() == Operator.RightParenthesis:
# Basically, we should consume right parenthesis only with optional whitespace
scanner.pos += 1
l = len(text)
while scanner.pos < l:
ch = scanner.cur()
if ch != Operator.RightParenthesis and not (opt['whitespace'] and is_space(ch)):
break
scanner.pos += 1
end = scanner.pos
braces = 0
while scanner.pos >= 0:
if number(scanner):
continue
ch = scanner.prev()
if ch == Operator.RightParenthesis:
braces += 1
elif ch == Operator.LeftParenthesis:
if not braces:
break
braces -= 1
elif not ((opt['whitespace'] and is_space(ch)) or is_sign(ch) or is_operator(ch)):
break
scanner.pos -= 1
if scanner.pos != end and not braces:
# Trim whitespace
while is_space(scanner.cur()):
scanner.pos += 1
return (scanner.pos, end)
def number(scanner: BackwardScanner):
if is_number(scanner.prev()):
scanner.pos -= 1
dot = False
while scanner.pos >= 0:
ch = scanner.prev()
if ch == '.':
if dot:
# Decimal delimiter already consumed, abort
break
dot = True
elif not is_number(ch):
break
scanner.pos -= 1
return True
return False
```
#### File: py-emmet/emmet/output_stream.py
```python
from .config import Config
from .abbreviation.convert import AbbreviationAttribute, AbbreviationNode
class OutputStream:
__slots__ = ('options', '_value', 'level', 'offset', 'line', 'column')
def __init__(self, options={}, level=0):
self._value = []
self.options = options
self.level = level
self.offset = 0
self.line = 0
self.column = 0
@property
def value(self):
return ''.join(self._value)
def _push(self, text: str):
"Pushes raw string into output stream without any processing"
l = len(text)
self._value.append(text)
self.offset += l
self.column += l
def push(self, text: str):
"Pushes plain string into output stream without newline processing"
process_text = self.options.get('output.text')
self._push(process_text(text, offset=self.offset, line=self.line, column=self.column))
def push_string(self, value: str):
"Pushes given string with possible newline formatting into output"
# If given value contains newlines, we should push content line-by-line and
# use `push_newline()` to maintain proper line/column state
first = True
for line in value.splitlines():
if not first: self.push_newline(True)
first = False
self.push(line)
def push_newline(self, indent=None):
"Pushes new line into given output stream"
base_indent = self.options.get('output.baseIndent')
newline = self.options.get('output.newline')
self.push('%s%s' % (newline, base_indent))
self.line += 1
self.column = len(base_indent)
if indent:
self.push_indent(self.level if indent is True else indent)
def push_indent(self, size=None):
"Adds indentation of `size` to current output stream"
if size is None: size = self.level
indent = self.options.get('output.indent')
self.push(indent * max(size, 0))
def push_field(self, index: int, placeholder: str=''):
field = self.options.get('output.field')
# NB: use `_push` instead of `push` to skip text processing
self._push(field(index, placeholder, offset=self.offset, line=self.line, column=self.column))
def tag_name(name: str, config: Config):
"Returns given tag name formatted according to given config"
return str_case(name, config.options.get('output.tagCase'))
def attr_name(name: str, config: Config):
"Returns given attribute name formatted according to given config"
return str_case(name, config.options.get('output.attributeCase'))
def attr_quote(attr: AbbreviationAttribute, config: Config, is_open: bool=None):
"Returns character for quoting value of given attribute"
if attr.value_type == 'expression':
return '{' if is_open else '}'
return '\'' if config.options.get('output.attributeQuotes') == 'single' else '"'
def is_boolean_attribute(attr: AbbreviationAttribute, config: Config):
"Check if given attribute is boolean"
if attr.boolean:
return True
name = (attr.name or '').lower()
return name in config.options.get('output.booleanAttributes', [])
def self_close(config: Config):
"Returns a token for self-closing tag, depending on current options"
style = config.options.get('output.selfClosingStyle')
if style == 'xhtml': return ' /'
if style == 'xml': return '/'
return ''
def is_inline(node: AbbreviationNode, config: Config):
if isinstance(node, str):
return node.lower() in config.options.get('inlineElements', [])
# inline node is a node either with inline-level name or text-only node
return is_inline(node.name, config) if node.name else bool(node.value and not node.attributes)
def str_case(text: str, case_type: str):
if case_type:
return text.upper() if case_type == 'upper' else text.lower()
return text
```
#### File: py-emmet/emmet/token_scanner.py
```python
class TokenScanner:
def __init__(self, tokens: list):
self.tokens = tokens
self.start = 0
self.pos = 0
self.size = len(tokens)
def peek(self):
return self.tokens[self.pos] if self.readable() else None
def next(self):
t = self.peek()
self.pos += 1
return t
def slice(self, start: int=None, end: int=None):
if start is None: start = self.start
if end is None: end = self.pos
return self.tokens[start:end]
def readable(self):
return self.pos < self.size
def consume(self, test: callable):
token = self.peek()
if token and test(token):
self.pos += 1
return True
return False
def consume_while(self, test: callable):
start = self.pos
while self.consume(test):
pass
return self.pos != start
def error(self, message: str, token=None):
if token is None:
token = self.peek()
pos = None
if token and token.start is not None:
pos = token.start
message += ' at %d' % pos
return TokenScannerException(message, pos)
class TokenScannerException(Exception):
def __init__(self, message: str, pos: int):
super(TokenScannerException, self).__init__(message)
self.message = message
self.pos = pos
```
#### File: tests/abbreviation/test_tokenizer.py
```python
import unittest
import sys
sys.path.append('../')
from emmet.abbreviation.tokenizer import tokenize
def json_tokens(abbr: str):
return [token.to_json() for token in tokenize(abbr)]
class TestScanner(unittest.TestCase):
def test_basic_abbreviations(self):
self.assertEqual(json_tokens('ul>li'), [
{ 'type': 'Literal', 'value': 'ul', 'start': 0, 'end': 2 },
{ 'type': 'Operator', 'operator': 'child', 'start': 2, 'end': 3 },
{ 'type': 'Literal', 'value': 'li', 'start': 3, 'end': 5 }
])
self.assertEqual(json_tokens('ul[title="foo+bar\'str\'" (attr)=bar]{(some > text)}'), [
{ 'type': 'Literal', 'value': 'ul', 'start': 0, 'end': 2 },
{ 'type': 'Bracket', 'open': True, 'context': 'attribute', 'start': 2, 'end': 3 },
{ 'type': 'Literal', 'value': 'title', 'start': 3, 'end': 8 },
{ 'type': 'Operator', 'operator': 'equal', 'start': 8, 'end': 9 },
{ 'type': 'Quote', 'single': False, 'start': 9, 'end': 10 },
{ 'type': 'Literal', 'value': 'foo+bar\'str\'', 'start': 10, 'end': 22 },
{ 'type': 'Quote', 'single': False, 'start': 22, 'end': 23 },
{ 'type': 'WhiteSpace', 'start': 23, 'end': 24 },
{ 'type': 'Bracket', 'open': True, 'context': 'group', 'start': 24, 'end': 25 },
{ 'type': 'Literal', 'value': 'attr', 'start': 25, 'end': 29 },
{ 'type': 'Bracket', 'open': False, 'context': 'group', 'start': 29, 'end': 30 },
{ 'type': 'Operator', 'operator': 'equal', 'start': 30, 'end': 31 },
{ 'type': 'Literal', 'value': 'bar', 'start': 31, 'end': 34 },
{ 'type': 'Bracket', 'open': False, 'context': 'attribute', 'start': 34, 'end': 35 },
{ 'type': 'Bracket', 'open': True, 'context': 'expression', 'start': 35, 'end': 36 },
{ 'type': 'Literal', 'value': '(some > text)', 'start': 36, 'end': 49 },
{ 'type': 'Bracket', 'open': False, 'context': 'expression', 'start': 49, 'end': 50 }
])
self.assertEqual(json_tokens('h${some${1:field placeholder}}'), [
{ 'type': 'Literal', 'value': 'h', 'start': 0, 'end': 1 },
{ 'type': 'RepeaterNumber', 'size': 1, 'parent': 0, 'reverse': False, 'base': 1, 'start': 1, 'end': 2 },
{ 'type': 'Bracket', 'open': True, 'context': 'expression', 'start': 2, 'end': 3 },
{ 'type': 'Literal', 'value': 'some', 'start': 3, 'end': 7 },
{ 'type': 'Field', 'index': 1, 'name': 'field placeholder', 'start': 7, 'end': 29 },
{ 'type': 'Bracket', 'open': False, 'context': 'expression', 'start': 29, 'end': 30 }
])
self.assertEqual(json_tokens('div{[}+a{}'), [
{ 'type': 'Literal', 'value': 'div', 'start': 0, 'end': 3 },
{ 'type': 'Bracket', 'open': True, 'context': 'expression', 'start': 3, 'end': 4 },
{ 'type': 'Literal', 'value': '[', 'start': 4, 'end': 5 },
{ 'type': 'Bracket', 'open': False, 'context': 'expression', 'start': 5, 'end': 6 },
{ 'type': 'Operator', 'operator': 'sibling', 'start': 6, 'end': 7 },
{ 'type': 'Literal', 'value': 'a', 'start': 7, 'end': 8 },
{ 'type': 'Bracket', 'open': True, 'context': 'expression', 'start': 8, 'end': 9 },
{ 'type': 'Bracket', 'open': False, 'context': 'expression', 'start': 9, 'end': 10 }
])
def test_repeater(self):
self.assertEqual(json_tokens('#sample*3'), [
{ 'type': 'Operator', 'operator': 'id', 'start': 0, 'end': 1 },
{ 'type': 'Literal', 'value': 'sample', 'start': 1, 'end': 7 },
{ 'type': 'Repeater', 'count': 3, 'value': 0, 'implicit': False, 'start': 7, 'end': 9 }
])
self.assertEqual(json_tokens('div[foo*3]'), [
{ 'type': 'Literal', 'value': 'div', 'start': 0, 'end': 3 },
{ 'type': 'Bracket', 'open': True, 'context': 'attribute', 'start': 3, 'end': 4 },
{ 'type': 'Literal', 'value': 'foo*3', 'start': 4, 'end': 9 },
{ 'type': 'Bracket', 'open': False, 'context': 'attribute', 'start': 9, 'end': 10 }
])
self.assertEqual(json_tokens('({a*2})*3'), [
{ 'type': 'Bracket', 'open': True, 'context': 'group', 'start': 0, 'end': 1 },
{ 'type': 'Bracket', 'open': True, 'context': 'expression', 'start': 1, 'end': 2 },
{ 'type': 'Literal', 'value': 'a*2', 'start': 2, 'end': 5 },
{ 'type': 'Bracket', 'open': False, 'context': 'expression', 'start': 5, 'end': 6 },
{ 'type': 'Bracket', 'open': False, 'context': 'group', 'start': 6, 'end': 7 },
{ 'type': 'Repeater', 'count': 3, 'value': 0, 'implicit': False, 'start': 7, 'end': 9 }
])
```
#### File: tests/action_utils/test_html.py
```python
import unittest
import sys
import os.path
sys.path.append('../../')
from emmet.action_utils import select_item_html, get_open_tag
def read_file(file: str):
dirname = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(dirname, file), 'r') as f:
return f.read(None)
sample = read_file('sample.html')
class TestHTMLActionUtils(unittest.TestCase):
def test_select_next_item(self):
# `<li class="item item_1">`: select tag name, full attribute, attribute
# value and class names
self.assertEqual(select_item_html(sample, 9).to_json(), {
'start': 9,
'end': 33,
'ranges': [
(10, 12),
(13, 32),
(20, 31),
(20, 24),
(25, 31)
]
})
# <a href="/sample" title={expr}>
self.assertEqual(select_item_html(sample, 33).to_json(), {
'start': 42,
'end': 74,
'ranges': [
(43, 44),
(45, 59),
(51, 58),
(61, 73),
(68, 72)
]
})
def test_select_previous_item(self):
# <a href="/sample" title={expr}>
self.assertEqual(select_item_html(sample, 80, True).to_json(), {
'start': 42,
'end': 74,
'ranges': [
(43, 44),
(45, 59),
(51, 58),
(61, 73),
(68, 72)
]
})
# <li class="item item_1">
self.assertEqual(select_item_html(sample, 42, True).to_json(), {
'start': 9,
'end': 33,
'ranges': [
(10, 12),
(13, 32),
(20, 31),
(20, 24),
(25, 31)
]
})
def test_get_open_tag(self):
self.assertEqual(get_open_tag(sample, 60).to_json(), {
'name': 'a',
'type': 1,
'start': 42,
'end': 74,
'attributes': [{
'name': 'href',
'name_start': 45,
'name_end': 49,
'value': '"/sample"',
'value_start': 50,
'value_end': 59
}, {
'name': 'title',
'name_start': 61,
'name_end': 66,
'value': '{expr}',
'value_start': 67,
'value_end': 73
}]
})
self.assertEqual(get_open_tag(sample, 15).to_json(), {
'name': 'li',
'type': 1,
'start': 9,
'end': 33,
'attributes': [{
'name': 'class',
'name_start': 13,
'name_end': 18,
'value': '"item item_1"',
'value_start': 19,
'value_end': 32
}]
})
self.assertEqual(get_open_tag(sample, 74), None)
```
#### File: tests/css_matcher/test_scan.py
```python
import unittest
import sys
sys.path.append('../../')
from emmet.css_matcher import scan
def tokens(source: str):
result = []
scan(source, lambda token_type, start, end, delimiter: result.append([source[start:end], token_type, start, end, delimiter]))
return result
class TestCSSScanner(unittest.TestCase):
def test_selectors(self):
self.assertEqual(tokens('a {}'), [
['a', 'selector', 0, 1, 2],
['}', 'blockEnd', 3, 4, 3]
])
self.assertEqual(tokens('a { foo: bar; }'), [
['a', 'selector', 0, 1, 2],
['foo', 'propertyName', 4, 7, 7],
['bar', 'propertyValue', 9, 12, 12],
['}', 'blockEnd', 14, 15, 14]
])
self.assertEqual(tokens('a { b{} }'), [
['a', 'selector', 0, 1, 2],
['b', 'selector', 4, 5, 5],
['}', 'blockEnd', 6, 7, 6],
['}', 'blockEnd', 8, 9, 8]
])
self.assertEqual(tokens('a {:;}'), [
['a', 'selector', 0, 1, 2],
['}', 'blockEnd', 5, 6, 5]
])
self.assertEqual(tokens('a + b.class[attr="}"] { }'), [
['a + b.class[attr="}"]', 'selector', 0, 21, 22],
['}', 'blockEnd', 24, 25, 24]
])
self.assertEqual(tokens('a /* b */ { foo: bar; }'), [
['a', 'selector', 0, 1, 10],
['foo', 'propertyName', 12, 15, 15],
['bar', 'propertyValue', 17, 20, 20],
['}', 'blockEnd', 22, 23, 22]
])
def test_property(self):
self.assertEqual(tokens('a'), [
['a', 'propertyName', 0, 1, -1]
])
self.assertEqual(tokens('a:b'), [
['a', 'propertyName', 0, 1, 1],
['b', 'propertyValue', 2, 3, -1]
])
self.assertEqual(tokens('a:b;;'), [
['a', 'propertyName', 0, 1, 1],
['b', 'propertyValue', 2, 3, 3]
])
self.assertEqual(tokens('a { b: c; d: e; }'), [
['a', 'selector', 0, 1, 2],
['b', 'propertyName', 4, 5, 5],
['c', 'propertyValue', 7, 8, 8],
['d', 'propertyName', 10, 11, 11],
['e', 'propertyValue', 13, 14, 14],
['}', 'blockEnd', 16, 17, 16]
])
self.assertEqual(tokens('a { foo: bar "baz}" ; }'), [
['a', 'selector', 0, 1, 2],
['foo', 'propertyName', 4, 7, 7],
['bar "baz}"', 'propertyValue', 9, 19, 20],
['}', 'blockEnd', 22, 23, 22]
])
self.assertEqual(tokens('@media (min-width: 900px) {}'), [
['@media (min-width: 900px)', 'selector', 0, 25, 26],
['}', 'blockEnd', 27, 28, 27]
])
def test_pseudo_selectors(self):
self.assertEqual(tokens('\na:hover { foo: bar "baz}" ; }'), [
['a:hover', 'selector', 1, 8, 9],
['foo', 'propertyName', 11, 14, 14],
['bar "baz}"', 'propertyValue', 16, 26, 27],
['}', 'blockEnd', 29, 30, 29]
])
self.assertEqual(tokens('a:hover b[title=""] { padding: 10px; }'), [
['a:hover b[title=""]', 'selector', 0, 19, 20],
['padding', 'propertyName', 22, 29, 29],
['10px', 'propertyValue', 31, 35, 35],
['}', 'blockEnd', 37, 38, 37]
])
self.assertEqual(tokens('a::before {}'), [
['a::before', 'selector', 0, 9, 10],
['}', 'blockEnd', 11, 12, 11]
])
self.assertEqual(tokens('a { &::before { } }'), [
['a', 'selector', 0, 1, 2],
['&::before', 'selector', 4, 13, 14],
['}', 'blockEnd', 17, 18, 17],
['}', 'blockEnd', 19, 20, 19]
])
```
#### File: tests/html_matcher/test_balanced.py
```python
import unittest
import sys
import os.path
sys.path.append('../../')
from emmet.html_matcher import balanced_inward, balanced_outward
def inward(src: str, pos: int):
return [tag.to_json() for tag in balanced_inward(src, pos)]
def outward(src: str, pos: int):
return [tag.to_json() for tag in balanced_outward(src, pos)]
def read_file(file: str):
dirname = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(dirname, file), 'r') as f:
return f.read(None)
class TestHTMLBalancedModels(unittest.TestCase):
def test_outward(self):
doc = read_file('sample.html')
self.assertEqual(outward(doc, 0), [])
self.assertEqual(outward(doc, 1), [
{ 'name': 'ul', 'open': [0, 4], 'close': [179, 184] }
])
self.assertEqual(outward(doc, 73), [
{ 'name': 'li', 'open': [71, 75], 'close': [147, 152] },
{ 'name': 'ul', 'open': [0, 4], 'close': [179, 184] }
])
self.assertEqual(outward(doc, 114), [
{ 'name': 'br', 'open': [112, 118] },
{ 'name': 'div', 'open': [78, 83], 'close': [121, 127] },
{ 'name': 'li', 'open': [71, 75], 'close': [147, 152] },
{ 'name': 'ul', 'open': [0, 4], 'close': [179, 184] }
])
def test_inward(self):
doc = read_file('sample.html')
self.assertEqual(inward(doc, 0), [
{ 'name': 'ul', 'open': [0, 4], 'close': [179, 184] },
{ 'name': 'li', 'open': [6, 10], 'close': [25, 30] },
{ 'name': 'a', 'open': [10, 21], 'close': [21, 25] }
])
self.assertEqual(inward(doc, 1), [
{ 'name': 'ul', 'open': [0, 4], 'close': [179, 184] },
{ 'name': 'li', 'open': [6, 10], 'close': [25, 30] },
{ 'name': 'a', 'open': [10, 21], 'close': [21, 25] }
])
self.assertEqual(inward(doc, 73), [
{ 'name': 'li', 'open': [71, 75], 'close': [147, 152] },
{ 'name': 'div', 'open': [78, 83], 'close': [121, 127] },
{ 'name': 'img', 'open': [87, 108] }
])
self.assertEqual(inward(doc, 114), [
{ 'name': 'br', 'open': [112, 118] }
])
```
#### File: py-emmet/tests/test_format.py
```python
import unittest
import sys
sys.path.append('../')
from emmet.markup import parse, html, haml, pug, slim
from emmet.config import Config
def tabstops(index: int, placeholder: str, **kwargs):
if placeholder:
return '${%d:%s}' % (index, placeholder)
return '${%d}' % index
default_config = Config()
field = Config({
'options': {
'output.field': tabstops
}
})
def create_profile(options: dict):
return Config({'options': options})
def output_html(abbr: str, config=default_config):
return html(parse(abbr, config), config)
class TestHTMLFormat(unittest.TestCase):
def test_basic(self):
self.assertEqual(output_html('div>p'), '<div>\n\t<p></p>\n</div>')
self.assertEqual(output_html('div>p*3'), '<div>\n\t<p></p>\n\t<p></p>\n\t<p></p>\n</div>')
self.assertEqual(output_html('div#a>p.b*2>span'), '<div id="a">\n\t<p class="b"><span></span></p>\n\t<p class="b"><span></span></p>\n</div>')
self.assertEqual(output_html('div>div>div'), '<div>\n\t<div>\n\t\t<div></div>\n\t</div>\n</div>')
self.assertEqual(output_html('table>tr*2>td{item}*2'),
'<table>\n\t<tr>\n\t\t<td>item</td>\n\t\t<td>item</td>\n\t</tr>\n\t<tr>\n\t\t<td>item</td>\n\t\t<td>item</td>\n\t</tr>\n</table>')
def test_inline_elements(self):
profile = create_profile({ 'output.inlineBreak': 3 })
break_inline = create_profile({ 'output.inlineBreak': 1 })
keep_inline = create_profile({ 'output.inlineBreak': 0 })
xhtml = create_profile({ 'output.selfClosingStyle': 'xhtml' })
self.assertEqual(output_html('div>a>b*3', xhtml), '<div>\n\t<a href="">\n\t\t<b></b>\n\t\t<b></b>\n\t\t<b></b>\n\t</a>\n</div>')
self.assertEqual(output_html('p>i', profile), '<p><i></i></p>')
self.assertEqual(output_html('p>i*2', profile), '<p><i></i><i></i></p>')
self.assertEqual(output_html('p>i*2', break_inline), '<p>\n\t<i></i>\n\t<i></i>\n</p>')
self.assertEqual(output_html('p>i*3', profile), '<p>\n\t<i></i>\n\t<i></i>\n\t<i></i>\n</p>')
self.assertEqual(output_html('p>i*3', keep_inline), '<p><i></i><i></i><i></i></p>')
self.assertEqual(output_html('i*2', profile), '<i></i><i></i>')
self.assertEqual(output_html('i*3', profile), '<i></i>\n<i></i>\n<i></i>')
self.assertEqual(output_html('i{a}+i{b}', profile), '<i>a</i><i>b</i>')
self.assertEqual(output_html('img[src]/+p', xhtml), '<img src="" alt="" />\n<p></p>')
self.assertEqual(output_html('div>img[src]/+p', xhtml), '<div>\n\t<img src="" alt="" />\n\t<p></p>\n</div>')
self.assertEqual(output_html('div>p+img[src]/', xhtml), '<div>\n\t<p></p>\n\t<img src="" alt="" />\n</div>')
self.assertEqual(output_html('div>p+img[src]/+p', xhtml), '<div>\n\t<p></p>\n\t<img src="" alt="" />\n\t<p></p>\n</div>')
self.assertEqual(output_html('div>p+img[src]/*2+p', xhtml), '<div>\n\t<p></p>\n\t<img src="" alt="" /><img src="" alt="" />\n\t<p></p>\n</div>')
self.assertEqual(output_html('div>p+img[src]/*3+p', xhtml), '<div>\n\t<p></p>\n\t<img src="" alt="" />\n\t<img src="" alt="" />\n\t<img src="" alt="" />\n\t<p></p>\n</div>')
def test_generate_fields(self):
self.assertEqual(output_html('a[href]', field), '<a href="${1}">${2}</a>')
self.assertEqual(output_html('a[href]*2', field), '<a href="${1}">${2}</a><a href="${3}">${4}</a>')
self.assertEqual(output_html('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}\n${4} ${5:foo} ${6:bar}')
self.assertEqual(output_html('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar\n foo bar')
self.assertEqual(output_html('ul>li*2', field), '<ul>\n\t<li>${1}</li>\n\t<li>${2}</li>\n</ul>')
self.assertEqual(output_html('div>img[src]/', field), '<div><img src="${1}" alt="${2}"></div>')
def test_mixed_content(self):
self.assertEqual(output_html('div{foo}'), '<div>foo</div>')
self.assertEqual(output_html('div>{foo}'), '<div>foo</div>')
self.assertEqual(output_html('div>{foo}+{bar}'), '<div>\n\tfoo\n\tbar\n</div>')
self.assertEqual(output_html('div>{foo}+{bar}+p'), '<div>\n\tfoo\n\tbar\n\t<p></p>\n</div>')
self.assertEqual(output_html('div>{foo}+{bar}+p+{foo}+{bar}+p'), '<div>\n\tfoo\n\tbar\n\t<p></p>\n\tfoo\n\tbar\n\t<p></p>\n</div>')
self.assertEqual(output_html('div>{foo}+p+{bar}'), '<div>\n\tfoo\n\t<p></p>\n\tbar\n</div>')
self.assertEqual(output_html('div>{foo}>p'), '<div>\n\tfoo\n\t<p></p>\n</div>')
self.assertEqual(output_html('div>{<!-- ${0} -->}'), '<div><!-- --></div>')
self.assertEqual(output_html('div>{<!-- ${0} -->}+p'), '<div>\n\t<!-- -->\n\t<p></p>\n</div>')
self.assertEqual(output_html('div>p+{<!-- ${0} -->}'), '<div>\n\t<p></p>\n\t<!-- -->\n</div>')
self.assertEqual(output_html('div>{<!-- ${0} -->}>p'), '<div>\n\t<!-- <p></p> -->\n</div>')
self.assertEqual(output_html('div>{<!-- ${0} -->}*2>p'), '<div>\n\t<!-- <p></p> -->\n\t<!-- <p></p> -->\n</div>')
self.assertEqual(output_html('div>{<!-- ${0} -->}>p*2'), '<div>\n\t<!-- \n\t<p></p>\n\t<p></p>\n\t-->\n</div>')
self.assertEqual(output_html('div>{<!-- ${0} -->}*2>p*2'), '<div>\n\t<!-- \n\t<p></p>\n\t<p></p>\n\t-->\n\t<!-- \n\t<p></p>\n\t<p></p>\n\t-->\n</div>')
self.assertEqual(output_html('div>{<!-- ${0} -->}>b'), '<div>\n\t<!-- <b></b> -->\n</div>')
self.assertEqual(output_html('div>{<!-- ${0} -->}>b*2'), '<div>\n\t<!-- <b></b><b></b> -->\n</div>')
self.assertEqual(output_html('div>{<!-- ${0} -->}>b*3'), '<div>\n\t<!-- \n\t<b></b>\n\t<b></b>\n\t<b></b>\n\t-->\n</div>')
self.assertEqual(output_html('div>{<!-- ${0} -->}', field), '<div><!-- ${1} --></div>')
self.assertEqual(output_html('div>{<!-- ${0} -->}>b', field), '<div>\n\t<!-- <b>${1}</b> -->\n</div>')
def test_self_closing(self):
xml_style = create_profile({ 'output.selfClosingStyle': 'xml' })
html_style = create_profile({ 'output.selfClosingStyle': 'html' })
xhtml_style = create_profile({ 'output.selfClosingStyle': 'xhtml' })
self.assertEqual(output_html('img[src]/', html_style), '<img src="" alt="">')
self.assertEqual(output_html('img[src]/', xhtml_style), '<img src="" alt="" />')
self.assertEqual(output_html('img[src]/', xml_style), '<img src="" alt=""/>')
self.assertEqual(output_html('div>img[src]/', xhtml_style), '<div><img src="" alt="" /></div>')
def test_boolean_attributes(self):
compact = create_profile({'output.compactBoolean': True})
no_compact = create_profile({'output.compactBoolean': False})
self.assertEqual(output_html('p[b.]', no_compact), '<p b="b"></p>')
self.assertEqual(output_html('p[b.]', compact), '<p b></p>')
self.assertEqual(output_html('p[contenteditable]', compact), '<p contenteditable></p>')
self.assertEqual(output_html('p[contenteditable]', no_compact), '<p contenteditable="contenteditable"></p>')
self.assertEqual(output_html('p[contenteditable=foo]', compact), '<p contenteditable="foo"></p>')
def test_no_formatting(self):
profile = create_profile({ 'output.format': False })
self.assertEqual(output_html('div>p', profile), '<div><p></p></div>')
self.assertEqual(output_html('div>{foo}+p+{bar}', profile), '<div>foo<p></p>bar</div>')
self.assertEqual(output_html('div>{foo}>p', profile), '<div>foo<p></p></div>')
self.assertEqual(output_html('div>{<!-- ${0} -->}>p', profile), '<div><!-- <p></p> --></div>')
def test_format_specific_nodes(self):
self.assertEqual(output_html('{<!DOCTYPE html>}+html>(head>meta[charset=${charset}]/+title{${1:Document}})+body', field),
'<!DOCTYPE html>\n<html>\n<head>\n\t<meta charset="UTF-8">\n\t<title>${2:Document}</title>\n</head>\n<body>\n\t${3}\n</body>\n</html>')
def test_comment(self):
opt = Config({ 'options': { 'comment.enabled': True } })
self.assertEqual(output_html('ul>li.item', opt), '<ul>\n\t<li class="item"></li>\n\t<!-- /.item -->\n</ul>')
self.assertEqual(output_html('div>ul>li.item#foo', opt), '<div>\n\t<ul>\n\t\t<li class="item" id="foo"></li>\n\t\t<!-- /#foo.item -->\n\t</ul>\n</div>')
opt.options['comment.after'] = ' { [%ID] }'
self.assertEqual(output_html('div>ul>li.item#foo', opt), '<div>\n\t<ul>\n\t\t<li class="item" id="foo"></li> { %foo }\n\t</ul>\n</div>')
def output_haml(abbr: str, config=default_config):
return haml(parse(abbr, config), config)
class TestHAMLFormat(unittest.TestCase):
def test_basic(self):
self.assertEqual(output_haml('div#header>ul.nav>li[title=test].nav-item*2'),
'#header\n\t%ul.nav\n\t\t%li.nav-item(title="test") \n\t\t%li.nav-item(title="test") ')
# https://github.com/emmetio/emmet/issues/446
self.assertEqual(output_haml('li>a'), '%li\n\t%a(href="") ')
self.assertEqual(output_haml('div#foo[data-n1=v1 title=test data-n2=v2].bar'),
'#foo.bar(data-n1="v1" title="test" data-n2="v2") ')
profile = create_profile({ 'output.compactBoolean': True })
self.assertEqual(output_haml('input[disabled. foo title=test]/', profile), '%input(type="text" disabled foo="" title="test")/')
profile = create_profile({ 'output.compactBoolean': False })
self.assertEqual(output_haml('input[disabled. foo title=test]/', profile), '%input(type="text" disabled=true foo="" title="test")/')
def test_nodes_with_text(self):
self.assertEqual(output_haml('{Text 1}'), 'Text 1')
self.assertEqual(output_haml('span{Text 1}'), '%span Text 1')
self.assertEqual(output_haml('span{Text 1}>b{Text 2}'), '%span Text 1\n\t%b Text 2')
self.assertEqual(output_haml('span{Text 1\nText 2}>b{Text 3}'), '%span\n\tText 1 |\n\tText 2 |\n\t%b Text 3')
self.assertEqual(output_haml('div>span{Text 1\nText 2\nText 123}>b{Text 3}'), '%div\n\t%span\n\t\tText 1 |\n\t\tText 2 |\n\t\tText 123 |\n\t\t%b Text 3')
def test_generate_fields(self):
self.assertEqual(output_haml('a[href]', field), '%a(href="${1}") ${2}')
self.assertEqual(output_haml('a[href]*2', field), '%a(href="${1}") ${2}\n%a(href="${3}") ${4}')
self.assertEqual(output_haml('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}${4} ${5:foo} ${6:bar}')
self.assertEqual(output_haml('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar foo bar')
self.assertEqual(output_haml('ul>li*2', field), '%ul\n\t%li ${1}\n\t%li ${2}')
self.assertEqual(output_haml('div>img[src]/', field), '%div\n\t%img(src="${1}" alt="${2}")/')
def output_pug(abbr: str, config=default_config):
return pug(parse(abbr, config), config)
class TestPUGFormat(unittest.TestCase):
def test_basic(self):
self.assertEqual(output_pug('div#header>ul.nav>li[title=test].nav-item*2'),
'#header\n\tul.nav\n\t\tli.nav-item(title="test") \n\t\tli.nav-item(title="test") ')
self.assertEqual(output_pug('div#foo[data-n1=v1 title=test data-n2=v2].bar'),
'#foo.bar(data-n1="v1", title="test", data-n2="v2") ')
self.assertEqual(output_pug('input[disabled. foo title=test]'), 'input(type="text", disabled, foo="", title="test")')
# Use closing slash for XML output format
self.assertEqual(output_pug('input[disabled. foo title=test]', create_profile({ 'output.selfClosingStyle': 'xml' })), 'input(type="text", disabled, foo="", title="test")/');
def test_nodes_with_test(self):
self.assertEqual(output_pug('{Text 1}'), 'Text 1')
self.assertEqual(output_pug('span{Text 1}'), 'span Text 1')
self.assertEqual(output_pug('span{Text 1}>b{Text 2}'), 'span Text 1\n\tb Text 2')
self.assertEqual(output_pug('span{Text 1\nText 2}>b{Text 3}'), 'span\n\t| Text 1\n\t| Text 2\n\tb Text 3')
self.assertEqual(output_pug('div>span{Text 1\nText 2}>b{Text 3}'), 'div\n\tspan\n\t\t| Text 1\n\t\t| Text 2\n\t\tb Text 3')
def test_generate_fields(self):
self.assertEqual(output_pug('a[href]', field), 'a(href="${1}") ${2}')
self.assertEqual(output_pug('a[href]*2', field), 'a(href="${1}") ${2}\na(href="${3}") ${4}')
self.assertEqual(output_pug('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}${4} ${5:foo} ${6:bar}')
self.assertEqual(output_pug('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar foo bar')
self.assertEqual(output_pug('ul>li*2', field), 'ul\n\tli ${1}\n\tli ${2}')
self.assertEqual(output_pug('div>img[src]/', field), 'div\n\timg(src="${1}", alt="${2}")')
def output_slim(abbr: str, config=default_config):
return slim(parse(abbr, config), config)
class TestSlimFormat(unittest.TestCase):
def test_basic(self):
self.assertEqual(output_slim('div#header>ul.nav>li[title=test].nav-item*2'),
'#header\n\tul.nav\n\t\tli.nav-item title="test" \n\t\tli.nav-item title="test" ')
self.assertEqual(output_slim('div#foo[data-n1=v1 title=test data-n2=v2].bar'),
'#foo.bar data-n1="v1" title="test" data-n2="v2" ')
def test_nodes_with_text(self):
self.assertEqual(output_slim('{Text 1}'), 'Text 1')
self.assertEqual(output_slim('span{Text 1}'), 'span Text 1')
self.assertEqual(output_slim('span{Text 1}>b{Text 2}'), 'span Text 1\n\tb Text 2')
self.assertEqual(output_slim('span{Text 1\nText 2}>b{Text 3}'), 'span\n\t| Text 1\n\t| Text 2\n\tb Text 3')
self.assertEqual(output_slim('div>span{Text 1\nText 2}>b{Text 3}'), 'div\n\tspan\n\t\t| Text 1\n\t\t| Text 2\n\t\tb Text 3')
def test_generate_fields(self):
self.assertEqual(output_slim('a[href]', field), 'a href="${1}" ${2}')
self.assertEqual(output_slim('a[href]*2', field), 'a href="${1}" ${2}\na href="${3}" ${4}')
self.assertEqual(output_slim('{${0} ${1:foo} ${2:bar}}*2', field), '${1} ${2:foo} ${3:bar}${4} ${5:foo} ${6:bar}')
self.assertEqual(output_slim('{${0} ${1:foo} ${2:bar}}*2'), ' foo bar foo bar')
self.assertEqual(output_slim('ul>li*2', field), 'ul\n\tli ${1}\n\tli ${2}')
self.assertEqual(output_slim('div>img[src]/', field), 'div\n\timg src="${1}" alt="${2}"/')
```
#### File: py-emmet/tests/test_output_stream.py
```python
import unittest
import sys
sys.path.append('../')
from emmet.output_stream import OutputStream, tag_name, attr_name, self_close, is_inline
from emmet.config import Config
class TestOutputStream(unittest.TestCase):
def test_stream(self):
conf = Config({'options': {'output.baseIndent': '>>'}})
out = OutputStream(conf.options)
out.push('aaa')
self.assertEqual(out.value, 'aaa')
self.assertEqual(out.line, 0)
self.assertEqual(out.column, 3)
self.assertEqual(out.offset, 3)
out.push_string('bbb')
self.assertEqual(out.value, 'aaabbb')
self.assertEqual(out.line, 0)
self.assertEqual(out.column, 6)
self.assertEqual(out.offset, 6)
# Add text with newlines
out.push_string('ccc\nddd')
self.assertEqual(out.value, 'aaabbbccc\n>>ddd')
self.assertEqual(out.line, 1)
self.assertEqual(out.column, 5)
self.assertEqual(out.offset, 15)
# Add newline with indent
out.level += 1
out.push_newline(True)
self.assertEqual(out.value, 'aaabbbccc\n>>ddd\n>>\t')
self.assertEqual(out.line, 2)
self.assertEqual(out.column, 3)
self.assertEqual(out.offset, 19)
def test_profile_tag_name(self):
as_is = Config({ 'options': { 'output.tagCase': '' } })
upper = Config({ 'options': { 'output.tagCase': 'upper' } })
lower = Config({ 'options': { 'output.tagCase': 'lower' } })
self.assertEqual(tag_name('Foo', as_is), 'Foo')
self.assertEqual(tag_name('bAr', as_is), 'bAr')
self.assertEqual(tag_name('Foo', upper), 'FOO')
self.assertEqual(tag_name('bAr', upper), 'BAR')
self.assertEqual(tag_name('Foo', lower), 'foo')
self.assertEqual(tag_name('bAr', lower), 'bar')
def test_attribute_name(self):
as_is = Config({ 'options': { 'output.attributeCase': '' } })
upper = Config({ 'options': { 'output.attributeCase': 'upper' } })
lower = Config({ 'options': { 'output.attributeCase': 'lower' } })
self.assertEqual(attr_name('Foo', as_is), 'Foo')
self.assertEqual(attr_name('bAr', as_is), 'bAr')
self.assertEqual(attr_name('Foo', upper), 'FOO')
self.assertEqual(attr_name('bAr', upper), 'BAR')
self.assertEqual(attr_name('Foo', lower), 'foo')
self.assertEqual(attr_name('bAr', lower), 'bar')
def test_self_close(self):
html = Config({ 'options': { 'output.selfClosingStyle': 'html' } })
xhtml = Config({ 'options': { 'output.selfClosingStyle': 'xhtml' } })
xml = Config({ 'options': { 'output.selfClosingStyle': 'xml' } })
self.assertEqual(self_close(html), '')
self.assertEqual(self_close(xhtml), ' /')
self.assertEqual(self_close(xml), '/')
def test_inline_elements(self):
config = Config()
self.assertEqual(is_inline('a', config), True)
self.assertEqual(is_inline('b', config), True)
self.assertEqual(is_inline('c', config), False)
``` |
{
"source": "JingyunLiang/FKP",
"score": 2
} |
#### File: FKP/DIPFKP/main.py
```python
import os
import argparse
import torch
import sys
import numpy as np
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
from util import read_image, im2tensor01, map2tensor, tensor2im01, analytic_kernel, kernel_shift, evaluation_dataset
from config.configs import Config
from model.model import DIPFKP
# for nonblind SR
sys.path.append('../')
from NonblindSR.usrnet import USRNet
'''
# ------------------------------------------------
# main.py for DIP-KP
# ------------------------------------------------
'''
def train(conf, lr_image):
''' trainer for DIPFKP, etc.'''
model = DIPFKP(conf, lr_image)
kernel, sr = model.train()
return kernel, sr
def create_params(filename, args):
''' pass parameters to Config '''
params = ['--model', args.model,
'--input_image_path', os.path.join(args.input_dir, filename),
'--output_dir_path', os.path.abspath(args.output_dir),
'--path_KP', os.path.abspath(args.path_KP),
'--sf', args.sf]
if args.SR:
params.append('--SR')
if args.real:
params.append('--real')
return params
def main():
# Parse the command line arguments
prog = argparse.ArgumentParser()
prog.add_argument('--model', type=str, default='DIPFKP', help='models: DIPFKP, DIPSoftmax, DoubleDIP.')
prog.add_argument('--dataset', '-d', type=str, default='Set5',
help='dataset, e.g., Set5.')
prog.add_argument('--sf', type=str, default='2', help='The wanted SR scale factor')
prog.add_argument('--path-nonblind', type=str, default='../data/pretrained_models/usrnet_tiny.pth',
help='path for trained nonblind model')
prog.add_argument('--SR', action='store_true', default=False, help='when activated - nonblind SR is performed')
prog.add_argument('--real', action='store_true', default=False, help='if the input is real image')
# to be overwritten automatically
prog.add_argument('--path-KP', type=str, default='../data/pretrained_models/FKP_x2.pt',
help='path for trained kernel prior')
prog.add_argument('--input-dir', '-i', type=str, default='../data/datasets/Set5/DIPFKP_lr_x2',
help='path to image input directory.')
prog.add_argument('--output-dir', '-o', type=str,
default='../data/log_KernelGANFKP/Set5_DIPFKP_lr_x2', help='path to image output directory')
args = prog.parse_args()
# overwritting paths
args.path_KP = '../data/pretrained_models/FKP_x{}.pt'.format(args.sf)
args.input_dir = '../data/datasets/{}/DIPFKP_lr_x{}'.format(args.dataset, args.sf)
args.output_dir = '../data/log_DIPFKP/{}_{}_lr_x{}'.format(args.dataset, args.model, args.sf)
# load nonblind model
if args.SR:
netG = USRNet(n_iter=6, h_nc=32, in_nc=4, out_nc=3, nc=[16, 32, 64, 64],
nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose")
netG.load_state_dict(torch.load(args.path_nonblind), strict=True)
netG.eval()
for key, v in netG.named_parameters():
v.requires_grad = False
netG = netG.cuda()
filesource = os.listdir(os.path.abspath(args.input_dir))
filesource.sort()
for filename in filesource[:]:
print(filename)
# kernel estimation
conf = Config().parse(create_params(filename, args))
lr_image = im2tensor01(read_image(os.path.join(args.input_dir, filename))).unsqueeze(0)
# crop the image to 960x960 due to memory limit
if 'DIV2K' in args.input_dir:
crop = int(960 / 2 / conf.sf)
lr_image = lr_image[:, :, lr_image.shape[2] // 2 - crop: lr_image.shape[2] // 2 + crop,
lr_image.shape[3] // 2 - crop: lr_image.shape[3] // 2 + crop]
kernel, sr_dip = train(conf, lr_image)
plt.imsave(os.path.join(conf.output_dir_path, '%s.png' % conf.img_name), tensor2im01(sr_dip), vmin=0,
vmax=1., dpi=1)
# nonblind SR
if args.SR:
kernel = map2tensor(kernel)
sr = netG(lr_image, torch.flip(kernel, [2, 3]), int(args.sf),
(10 if args.real else 0) / 255 * torch.ones([1, 1, 1, 1]).cuda())
plt.imsave(os.path.join(conf.output_dir_path, '%s.png' % conf.img_name), tensor2im01(sr), vmin=0,
vmax=1., dpi=1)
if not conf.verbose:
evaluation_dataset(args.input_dir, conf)
prog.exit(0)
if __name__ == '__main__':
seed = 0
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = True
main()
``` |
{
"source": "jingyuyao/elitetraderoutes",
"score": 3
} |
#### File: elitetraderoutes/elitedata/models.py
```python
from django.db import models
from common.models import UUIDModel
# Create your models here.
class System(UUIDModel):
name = models.CharField(max_length=100)
needs_permit = models.NullBooleanField(default=False)
primary_economy = models.CharField(max_length=100, null=True)
population = models.BigIntegerField(default=0, null=True)
security = models.CharField(max_length=100, null=True)
allegiance = models.CharField(max_length=100, null=True)
government = models.CharField(max_length=100, null=True)
state = models.CharField(max_length=100, null=True)
faction = models.CharField(max_length=100, null=True)
power_control_faction = models.CharField(max_length=100, null=True)
x = models.FloatField()
y = models.FloatField()
z = models.FloatField()
updated_at = models.BigIntegerField()
def __str__(self):
return self.name
class Station(UUIDModel):
name = models.CharField(max_length=100)
system = models.ForeignKey(System, related_name="stations", editable=False)
type = models.CharField(max_length=100, null=True, blank=True)
max_landing_pad_size = models.CharField(max_length=100, null=True, blank=True) # L or M
distance_to_star = models.BigIntegerField(default=0, null=True, blank=True) # Might change depending on orbit???
allegiance = models.CharField(max_length=100, null=True, blank=True)
government = models.CharField(max_length=100, null=True, blank=True)
state = models.CharField(max_length=100, null=True, blank=True)
faction = models.CharField(max_length=100, null=True, blank=True)
has_repair = models.NullBooleanField(default=False, blank=True)
has_blackmarket = models.NullBooleanField(default=False, blank=True)
has_refuel = models.NullBooleanField(default=False, blank=True)
has_rearm = models.NullBooleanField(default=False, blank=True)
has_shipyard = models.NullBooleanField(default=False, blank=True)
has_outfitting = models.NullBooleanField(default=False, blank=True)
has_commodities = models.NullBooleanField(default=False, blank=True)
updated_at = models.BigIntegerField()
def __str__(self):
return self.name
class Commodity(UUIDModel):
name = models.CharField(max_length=100)
average_price = models.IntegerField(default=0, null=True, blank=True)
category_id = models.IntegerField(default=0, editable=False)
category_name = models.CharField(max_length=100, editable=False)
def __str__(self):
return self.name
class StationCommodity(UUIDModel):
commodity = models.ForeignKey(Commodity, related_name='station_commodities', editable=False)
station = models.ForeignKey(Station, related_name='station_commodities', editable=False)
buy_price = models.IntegerField(default=0)
supply = models.IntegerField(default=0)
supply_level = models.CharField(max_length=100, null=True, blank=True)
sell_price = models.IntegerField(default=0)
demand = models.IntegerField(default=0)
demand_level = models.CharField(max_length=100, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-created']
def __str__(self):
return '%s/%s(%i/%i)' % (str(self.station), str(self.commodity), int(self.buy_price), int(self.sell_price))
```
#### File: elitetraderoutes/elitedata/views.py
```python
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from common import permissions
from .models import System, Station, Commodity, StationCommodity
from .serializers import CommoditySerializer, StationSerializer, \
SystemSerializer, MinimizedSystemSerializer, StationCommoditySerializer
import django_filters
from common.views import WrappedModelViewSet, wrap_response
# Create your views here.
class SystemViewSet(WrappedModelViewSet):
permission_classes = (permissions.IsAdminOrReadOnly,)
queryset = System.objects.all()
serializer_class = SystemSerializer
search_fields = ('name',)
template_name = 'frontend/system/instance.html'
list_template_name = 'frontend/system/list.html'
@detail_route()
def stations(self, request, *args, **kwargs):
"""
A route to display only the stations this System contains.
:param request:
:param pk:
:return:
"""
system = self.get_object()
stations = Station.objects.filter(system=system)
serializer = StationSerializer(stations, context={'request': request}, many=True)
return wrap_response(Response({'results': serializer.data}, template_name='frontend/system/list_station.html'))
@detail_route()
def min(self, request, *args, **kwargs):
"""
A route to display the minimized System view.
:param request:
:param pk:
:return:
"""
serializer = MinimizedSystemSerializer(self.get_object(), context={'request': request})
data = serializer.data
data['min'] = True
return wrap_response(Response(data))
class StationViewSet(WrappedModelViewSet):
class StationFilter(django_filters.FilterSet):
distance_to_star = django_filters.NumberFilter(lookup_type='lt')
class Meta:
model = Station
fields = ('distance_to_star',)
permission_classes = (permissions.IsAdminOrReadOnly,)
queryset = Station.objects.all()
serializer_class = StationSerializer
filter_class = StationFilter
search_fields = ('name', )
template_name = 'frontend/station/instance.html'
list_template_name = 'frontend/station/list.html'
class CommodityViewSet(WrappedModelViewSet):
class CommodityFilter(django_filters.FilterSet):
average_price = django_filters.NumberFilter(lookup_type='lt')
name = django_filters.CharFilter(lookup_type='icontains')
class Meta:
model = Commodity
fields = ('average_price', 'name')
permission_classes = (permissions.IsAdminOrReadOnly,)
queryset = Commodity.objects.all()
serializer_class = CommoditySerializer
filter_class = CommodityFilter
search_fields = ('name',)
template_name = 'frontend/commodity/instance.html'
list_template_name = 'frontend/commodity/list.html'
class StationCommodityViewSet(WrappedModelViewSet):
class StationCommodityFilter(django_filters.FilterSet):
class Meta:
model = StationCommodity
fields = {
'station': ['exact'],
'commodity': ['exact'],
'supply_level': ['exact'],
'demand_level': ['exact'],
'buy_price': ['lt', 'gt'],
'sell_price': ['lt', 'gt'],
'supply': ['lt', 'gt'],
'demand': ['lt', 'gt'],
}
permission_classes = (permissions.IsAdminOrReadOnly,)
queryset = StationCommodity.objects.all()
serializer_class = StationCommoditySerializer
template_name = 'frontend/station_commodity/instance.html'
list_template_name = 'frontend/station_commodity/list.html'
filter_class = StationCommodityFilter
search_fields = ('commodity__name', 'station__name', 'commodity__category_name')
```
#### File: elitetraderoutes/traderoutes/serializers.py
```python
from rest_framework import serializers
from .models import Route, Connection
from elitedata.models import Station, System, Commodity
from elitedata.serializers import MinimizedSystemSerializer, StationSerializer, CommoditySerializer
from common.serializers import IDHyperlinkedModelSerializer
class BaseConnectionSerializer(IDHyperlinkedModelSerializer):
"""
Base serializer for Connection model.
"""
route = serializers.HyperlinkedRelatedField(view_name='route-detail', queryset=Route.objects.all())
class Meta:
model = Connection
class WriteConnectionSerializer(BaseConnectionSerializer):
"""
The write serializer for the Connection class.
This serializer will validate start and destination stations belong in the specified
start and destination system.
Note:
The queryset for the system and station fields encompasses all of the objects in their
respective model. This makes the default API view extremely slow as it will load up all
the possible choices for selection in the view. It does not affect normal json calls.
"""
start_system = serializers.HyperlinkedRelatedField(view_name='system-detail', queryset=System.objects.all())
start_station = serializers.HyperlinkedRelatedField(view_name='station-detail', queryset=Station.objects.all())
destination_system = serializers.HyperlinkedRelatedField(view_name='system-detail', queryset=System.objects.all())
destination_station = serializers.HyperlinkedRelatedField(view_name='station-detail', queryset=Station.objects.all())
commodity = serializers.HyperlinkedRelatedField(view_name='commodity-detail', queryset=Commodity.objects.all())
def validate(self, data):
"""
Make sure start and destination stations are in start and destination systems, respectively.
:param data:
:return:
"""
self._validate_station(data['start_system'], data['start_station'])
self._validate_station(data['destination_system'], data['destination_station'])
return data
@staticmethod
def _validate_station(system, station):
"""
Validates whether 'station' is in 'system'.
:param system:
:param station:
:return:
"""
if station not in Station.objects.filter(system=system):
raise serializers.ValidationError("Station(%s) not in system(%s)." % (str(station), str(system)))
class ReadConnectionSerializer(BaseConnectionSerializer):
"""
The read serializer for the Connection model.
This serializer shows the referenced models as nested objects.
"""
start_system = MinimizedSystemSerializer()
start_station = StationSerializer()
destination_system = MinimizedSystemSerializer()
destination_station = StationSerializer()
commodity = CommoditySerializer()
distance = serializers.SerializerMethodField() # Read only
@staticmethod
def get_distance(obj):
return obj.distance()
class BaseRouteSerializer(IDHyperlinkedModelSerializer):
"""
Base serializer for Route. Sub-class need to provide value for connections
"""
owner_name = serializers.ReadOnlyField(source='owner.username')
@property
def connections(self):
"""
A serializer for Connection
"""
raise NotImplementedError
class Meta:
model = Route
class RouteSerializer(BaseRouteSerializer):
"""
Serializer for the Route.
Includes detailed information on all the Connection the Route has.
"""
connections = ReadConnectionSerializer(many=True, read_only=True)
class MinimizedRouteSerializer(BaseRouteSerializer):
"""
Minimized serializer for Route.
Only contains a link each Connection the Route has.
"""
connections = serializers.HyperlinkedRelatedField(view_name="connection-detail", many=True, read_only=True)
"""
Why not use primary key relationships?
- Avoids building URL
- No way to misrepresent information
"""
# class ConnectionSerializer(serializers.ModelSerializer):
# route = serializers.PrimaryKeyRelatedField(queryset=Route.objects.all())
# start_system = serializers.PrimaryKeyRelatedField(queryset=System.objects.all())
# start_station = serializers.PrimaryKeyRelatedField(queryset=Station.objects.all())
# destination_system = serializers.PrimaryKeyRelatedField(queryset=System.objects.all())
# destination_station = serializers.PrimaryKeyRelatedField(queryset=Station.objects.all())
# commodity = serializers.PrimaryKeyRelatedField(queryset=Commodity.objects.all())
#
# class Meta:
# model = Connection
# fields = ('pk', 'route',
# 'start_system', 'start_station',
# 'destination_system', 'destination_station',
# 'commodity', 'buy_price', 'sell_price', 'supply', 'demand')
``` |
{
"source": "Jingyuying/pc_compress",
"score": 2
} |
#### File: pc_compress/src/compress.py
```python
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
logger = logging.getLogger(__name__)
import os
import numpy as np
import tensorflow as tf
import argparse
import compression_model_512
import compression_model_1024
import compression_model_2048
import pc_io
import gzip
from tqdm import tqdm
import data_provider
import open3d as o3d
import glob
import fpzip
np.random.seed(42)
tf.set_random_seed(42)
################################################################################
### Script
################################################################################
TYPE = np.uint16
DTYPE = np.dtype(np.uint16)
SHAPE_LEN = 3
def compress(nn_output):
string = nn_output['string']
str_len = len(string)
byte_len = np.array(str_len, dtype=TYPE).tobytes()
representation = byte_len + string
return representation
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='compress.py',
description='Compress a file.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--input_dir',
default='../ply_data',
help='Input directory.')
parser.add_argument(
'--input_pattern',
default='*.ply',
help='Mesh detection pattern.')
parser.add_argument(
'--source_extension',
help='Mesh files extension',
default='.ply')
parser.add_argument(
'--output_dir',
default='../output',
help='Output directory.')
parser.add_argument(
'--checkpoint_dir_512',
default='/data/wenxuanzheng/mix_compress/512/00005',
help='Directory where to save/load model checkpoints.')
parser.add_argument(
'--checkpoint_dir_1024',
default='/data/wenxuanzheng/mix_compress/1024/00005',
help='Directory where to save/load model checkpoints.')
parser.add_argument(
'--checkpoint_dir_2048',
default='/data/wenxuanzheng/mix_compress/2048/00005',
help='Directory where to save/load model checkpoints.')
parser.add_argument(
'--batch_size', type=int, default=1,
help='Batch size.')
parser.add_argument(
'--knn',
type=int, help='k-nearest neighbors.', default=12)
parser.add_argument(
'--read_batch_size', type=int, default=1,
help='Batch size for parallel reading.')
parser.add_argument(
'--preprocess_threads', type=int, default=16,
help='Number of CPU threads to use for parallel decoding.')
args = parser.parse_args()
assert args.batch_size > 0, 'batch_size must be positive'
args.input_dir = os.path.normpath(args.input_dir)
len_input_dir = len(args.input_dir)
assert os.path.exists(args.input_dir), "Input directory not found"
input_glob = os.path.join(args.input_dir, args.input_pattern)
files = pc_io.get_files(input_glob)
assert len(files) > 0, "No input files found"
filenames = [x[len_input_dir+1:] for x in files]
output_files = [os.path.join(args.output_dir, x + '.bin') for x in filenames]
output_infs = [os.path.join(args.output_dir, x + '.inf.bin') for x in filenames]
pcQueue_512, pcQueue_1024, pcQueue_2048 = data_provider.load_data(
args.input_dir, args.source_extension)
centroid_list = []
furthest_distance_list = []
meta_matrix, nor_pc_512, nor_pc_1024, nor_pc_2048 = data_provider.gen_meta(centroid_list,furthest_distance_list,pcQueue_512,pcQueue_1024,pcQueue_2048)
compressed_bytes = fpzip.compress(meta_matrix.astype(np.float32), precision=0, order='C')
with open(output_infs[0], 'wb') as f:
f.write(compressed_bytes)
f.close()
def patch_compress(points,model,checkpoint):
estimator = tf.estimator.Estimator(
model_fn=model.model_fn,
model_dir=checkpoint,
params={
'checkpoint_dir': checkpoint,
'batch_size': args.batch_size,
'knn': args.knn,
})
result = estimator.predict(
input_fn=lambda: model.input_fn(points, args.batch_size, args.preprocess_threads, repeat=False, is_train = False),
predict_keys=['string', 'x_tilde', 'y_tilde'])
return result
output_list = []
output_list.append(patch_compress(nor_pc_512, compression_model_512, args.checkpoint_dir_512))
output_list.append(patch_compress(nor_pc_1024, compression_model_1024, args.checkpoint_dir_1024))
output_list.append(patch_compress(nor_pc_2048, compression_model_2048, args.checkpoint_dir_2048))
with gzip.open(output_files[0], "ab") as f:
patch_num_512 = len(nor_pc_512)
patch_num_1024 = len(nor_pc_1024)
patch_num_2048 = len(nor_pc_2048)
patch_512_byte = np.array(patch_num_512, dtype=TYPE).tobytes()
patch_1024_byte = np.array(patch_num_1024, dtype=TYPE).tobytes()
patch_2048_byte = np.array(patch_num_2048, dtype=TYPE).tobytes()
f.write(patch_512_byte + patch_1024_byte + patch_2048_byte)
for i in range(len(output_list)):
for ret in output_list[i]:
representation = compress(ret)
f.write(representation)
```
#### File: pc_compress/src/focal_loss.py
```python
import tensorflow as tf
import tensorflow.keras.backend as K
import numpy as np
# import sparse
tf.enable_eager_execution()
def focal_loss(y_true, y_pred, gamma=2, alpha=0.95):
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
pt_1 = K.clip(pt_1, 1e-3, .999)
pt_0 = K.clip(pt_0, 1e-3, .999)
return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.sum((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0))
# return -(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - ((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0)), -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.sum((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0))
def pc_to_tf(points, dense_tensor_shape):
x = points
x = tf.pad(x, [[0, 0], [1, 0]])
print(x)
st = tf.sparse.SparseTensor(x, tf.ones_like(x[:, 0]), dense_tensor_shape)
print(st)
return st
def process_x(x, dense_tensor_shape):
x = tf.sparse.to_dense(x, default_value=0, validate_indices=False)
x.set_shape(dense_tensor_shape)
x = tf.cast(x, tf.float32)
return x
if __name__ == '__main__':
coords = np.random.randint(0, 64 - 1, size=(3, 4096))
dense_tensor_shape = np.array([1, 64, 64, 64])
points = tf.constant(coords,dtype=tf.int64)
points = tf.transpose(points,(1,0))
data = pc_to_tf(points,dense_tensor_shape)
print(data)
data = process_x(data,dense_tensor_shape)
data = data.numpy()
print(np.sum(data==1))
coords = torch.from_numpy(coords)
coo_metrix = torch.sparse_coo_tensor(coords, torch.ones(2048), size=(64, 64, 64))
coo = coo_metrix.to_dense()
coo = coo.numpy()
print(np.sum(coo == 1))
# points = np.array([[0, 1, 2],
# [2, 1, 2],
# [0, 1, 0],
# [0, 1, 1],
# [2, 1, 0],
# [1, 1, 2]],dtype=np.int32)
# print(points.T.shape)
# x = sparse.COO(coords, 1, shape=((64,) * 3))
#
# y = sparse.tensordot(x, x, axes=((2, 0), (0, 1)))
#
# z = y.sum(axis=(0, 1, 2))
# print(z.todense())
#
# data = np.array(x.todense())
# print(np.sum(data == 1))
n = 64
ndims = 3
nnz = 1000
coords = np.random.randint(0, n - 1, size=(ndims, nnz))
s = sparse.SparseArray.fromdense(coords)
print(s)
# print(coords)
# data = np.random.random(nnz)
# print(data)
x = sparse.COO(coords, 1, shape=((n,) * ndims))
# y = sparse.tensordot(x, x, axes=((3, 0), (1, 2)))
# z = y.sum(axis=(0, 1, 2))
data = np.array(x.todense())
print(data.shape)
print(np.sum(data == 1))
# print(data)
# print(data.shape)
# x = tf.constant([[[[0.6, 0.3, 0.7],
# [0.6, 0.3, 0.7],
# [0.6, 0.3, 0.7]],
# [[0.6, 0.3, 0.7],
# [0.6, 0.3, 0.7],
# [0.6, 0.3, 0.7]],
# [[0.6, 0.3, 0.7],
# [0.6, 0.3, 0.7],
# [0.6, 0.3, 0.7]]]])
# print(x.shape)
# x = tf.random_normal((3,3,3))
# y = np.random.randint(0,2,size=(3,3,3))
# y = tf.constant([[[[1,0,1],
# [1,0,1],
# [1,0,1]],
# [[1,0,1],
# [1,0,1],
# [1,0,1]],
# [[1,0,1],
# [1,0,1],
# [1,0,1]]]])
# x = tf.expand_dims(x,axis=0)
# y = tf.expand_dims(y,axis=0)
# print(focal_loss(data,x))
```
#### File: pc_compress/src/pc_utils.py
```python
import tensorflow as tf
import scipy.sparse
from sklearn.neighbors import KDTree
import numpy as np
import math
import multiprocessing as multiproc
from functools import partial
def GridSampling(batch_size, meshgrid):
'''
output Grid points as a NxD matrix
params = {
'batch_size': 8
'meshgrid': [[-0.3,0.3,45],[-0.3,0.3,45]]
}
'''
ret = np.meshgrid(*[np.linspace(it[0], it[1], num=it[2]) for it in meshgrid])
ndim = len(meshgrid)
grid = np.zeros((np.prod([it[2] for it in meshgrid]), ndim), dtype=np.float32) # MxD 2025x2
for d in range(ndim):
grid[:, d] = np.reshape(ret[d], -1)
g = np.repeat(grid[np.newaxis, ...], repeats=batch_size, axis=0)
return g
def jitter_perturbation_point_cloud(batch_data, sigma=0.005, clip=0.02):
""" Randomly jitter points. jittering is per point.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, jittered batch of point clouds
"""
assert (clip > 0)
jittered_data = tf.clip_by_value(sigma * tf.random_normal(tf.shape(batch_data)), -1 * clip, clip)
jittered_data = tf.concat([batch_data[:, :, :3] + jittered_data[:, :, :3], batch_data[:, :, 3:]], axis=-1)
return jittered_data
def rotate_point_cloud_and_gt(batch_data, batch_gt=None):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
# batch_data = tf.expand_dims(batch_data,axis=0)
batch_size, num_point, num_channels = batch_data.get_shape().as_list()
angles = tf.random_uniform((batch_size, 3), dtype=tf.float32) * 2 * np.pi
cos_x, cos_y, cos_z = tf.split(tf.cos(angles), 3, axis=-1) # 3*[B, 1]
sin_x, sin_y, sin_z = tf.split(tf.sin(angles), 3, axis=-1) # 3*[B, 1]
one = tf.ones_like(cos_x, dtype=tf.float32)
zero = tf.zeros_like(cos_x, dtype=tf.float32)
# [B, 3, 3]
Rx = tf.stack(
[tf.concat([one, zero, zero], axis=1),
tf.concat([zero, cos_x, sin_x], axis=1),
tf.concat([zero, -sin_x, cos_x], axis=1)], axis=1)
Ry = tf.stack(
[tf.concat([cos_y, zero, -sin_y], axis=1),
tf.concat([zero, one, zero], axis=1),
tf.concat([sin_y, zero, cos_y], axis=1)], axis=1)
Rz = tf.stack(
[tf.concat([cos_z, sin_z, zero], axis=1),
tf.concat([-sin_z, cos_z, zero], axis=1),
tf.concat([zero, zero, one], axis=1)], axis=1)
rotation_matrix = tf.matmul(Rz, tf.matmul(Ry, Rx))
if num_channels > 3:
batch_data = tf.concat(
[tf.matmul(batch_data[:, :, :3], rotation_matrix),
tf.matmul(batch_data[:, :, 3:], rotation_matrix),
batch_data[:, :, 6:]], axis=-1)
else:
batch_data = tf.matmul(batch_data, rotation_matrix)
if batch_gt is not None:
if num_channels > 3:
batch_gt = tf.concat(
[tf.matmul(batch_gt[:, :, :3], rotation_matrix),
tf.matmul(batch_gt[:, :, 3:], rotation_matrix),
batch_gt[:, :, 6:]], axis=-1)
else:
batch_gt = tf.matmul(batch_gt, rotation_matrix)
return batch_data
def rotate_perturbation_point_cloud(batch_data, angle_sigma=0.03, angle_clip=0.09):
""" Randomly perturb the point clouds by small rotations
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
batch_size, num_point, num_channels = batch_data.get_shape().as_list()
angles = tf.clip_by_value(tf.random_normal((batch_size, 3)) * angle_sigma, -angle_clip, angle_clip)
cos_x, cos_y, cos_z = tf.split(tf.cos(angles), 3, axis=-1) # 3*[B, 1]
sin_x, sin_y, sin_z = tf.split(tf.sin(angles), 3, axis=-1) # 3*[B, 1]
one = tf.ones_like(cos_x, dtype=tf.float32)
zero = tf.zeros_like(cos_x, dtype=tf.float32)
# [B, 3, 3]
Rx = tf.stack(
[tf.concat([one, zero, zero], axis=1),
tf.concat([zero, cos_x, sin_x], axis=1),
tf.concat([zero, -sin_x, cos_x], axis=1)], axis=1)
Ry = tf.stack(
[tf.concat([cos_y, zero, -sin_y], axis=1),
tf.concat([zero, one, zero], axis=1),
tf.concat([sin_y, zero, cos_y], axis=1)], axis=1)
Rz = tf.stack(
[tf.concat([cos_z, sin_z, zero], axis=1),
tf.concat([-sin_z, cos_z, zero], axis=1),
tf.concat([zero, zero, one], axis=1)], axis=1)
rotation_matrix = tf.matmul(Rz, tf.matmul(Ry, Rx))
if num_channels > 3:
batch_data = tf.concat(
[tf.matmul(batch_data[:, :, :3], rotation_matrix),
tf.matmul(batch_data[:, :, 3:], rotation_matrix),
batch_data[:, :, 6:]], axis=-1)
else:
batch_data = tf.matmul(batch_data, rotation_matrix)
return batch_data
def random_scale_point_cloud_and_gt(batch_data, batch_gt=None, scale_low=0.5, scale_high=2):
""" Randomly scale the point cloud. Scale is per point cloud.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, scaled batch of point clouds
"""
B, N, C = batch_data.get_shape().as_list()
scales = tf.random_uniform((B, 1, 1), minval=scale_low, maxval=scale_high, dtype=tf.float32)
batch_data = tf.concat([batch_data[:, :, :3] * scales, batch_data[:, :, 3:]], axis=-1)
if batch_gt is not None:
batch_gt = tf.concat([batch_gt[:, :, :3] * scales, batch_gt[:, :, 3:]], axis=-1)
return batch_data
if __name__ == '__main__':
batch_size = 8
meshgrid = [[0, 1, 16], [0, 1, 16], [0, 1, 16]]
grid = GridSampling(batch_size, meshgrid)
```
#### File: tf_ops/devoxelization/test_decoxel.py
```python
import tensorflow as tf
from tensorflow.python.framework import ops
import sys
import os
import matplotlib.pyplot as plt
import random
from mpl_toolkits.mplot3d import Axes3D
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
devox_module = tf.load_op_library(os.path.join(BASE_DIR, 'tf_devoxelize.so'))
sys.path.append('../voxelization')
from tf_vox import group_voxel, voxelize, avg_voxel
sys.path.append('../../utils')
from tf_devox import trilinear_devoxelize
import tf_util
from plyfile import PlyData, PlyElement
sys.path.append(os.path.join(BASE_DIR, '..', 'tf_ops/pc_distance'))
import tf_nn_distance
tf.enable_eager_execution()
def get_cd_loss(pred, pc):
""" pred: BxNx3,
label: BxN, """
dists_forward,_,dists_backward,_ = tf_nn_distance.nn_distance(pred, pc)
# loss = tf.reduce_mean(dists_forward+dists_backward)
loss = (tf.reduce_mean(tf.sqrt(dists_forward)) + tf.reduce_mean(tf.sqrt(dists_backward)))/2
return loss
def write_ply(tensor, name):
np.savetxt(name, np.squeeze(tensor.numpy().transpose(0, 2, 1)))
len = tensor.numpy().shape[2]
file = os.path.join('.', name)
f = open(file, "r+")
lines = [line.lstrip().rstrip().replace(' ', ' ') for line in f]
vertex_nums, face_nums, _ = lines[1].split()
f = open(file, "w+")
f.seek(0)
head = "ply\nformat ascii 1.0\ncomment VCGLIB generated\nelement vertex " + '2048' + "\nproperty float x\nproperty float y\nproperty float z\nelement face " + str(0) + "\nproperty list uchar int vertex_indices\nend_header\n"
f.write(head)
for line in lines[:]:
f.write(line + "\n")
f.close()
if __name__ == '__main__':
import numpy as np
import time
for file in os.listdir('../../data'):
file = os.path.join('../../data',file)
plydata = PlyData.read(file)
a_data = []
for i in range(plydata['vertex'].count):
line = [plydata['vertex']['x'][i], plydata['vertex']['y'][i], plydata['vertex']['z'][i]]
a_data.append(line)
pc = np.array(a_data)
coords = pc[:, :3]
pts = tf.expand_dims(tf.constant(pc),axis=0)
# pts = tf.transpose(pts,perm=[0, 2, 1])
# pc_input = tf.placeholder(tf.float32, shape=(2, 2048, 3))
#
pts = np.tile(pts,reps=(1,1,1))
print(pts.shape) # (1, 2048, 3)
pts = tf.constant(pts)
voxel_feature = tf_util.pvcnn((pts,pts), scope='pvcnn_1',num_output_channels=3,
resolution=64, bn=True, is_training=True,
bn_decay=0.2)
print(voxel_feature.shape)
write_ply(tf.transpose(voxel_feature,perm=(0,2,1)), 'vox_pc.ply')
# # my_output = tf.multiply(pc_input, 0.01)
#
# loss = tf.Variable(tf.random_normal(shape=[1]))
# my_opt = tf.train.GradientDescentOptimizer(0.02)
# train_step = my_opt.minimize(loss)
#
#
# with tf.Session() as sess:
# tf.global_variables_initializer().run()
# for i in range(1000):
# sess.run(train_step, feed_dict={pc_input: pts})
# print('Loss = ' + str(sess.run(loss, feed_dict={pc_input: pts})))
exit(-1)
coords = tf.expand_dims(tf.constant(coords),axis=0)
coords = tf.transpose(coords, perm=[0, 2, 1])
vox_pc, norm_pc = voxelize(coords,8)
write_ply(norm_pc,'vox_orl.ply')
# np.savetxt('vox.ply',np.squeeze(vox_pc.numpy().transpose(0,2,1)))
# write_ply(vox_pc, 'vox_pc.ply')
vox_pc = tf.cast(vox_pc, tf.int32)
res = 8
# pts = tf.tile(pts,multiples=[4, 1, 1])
# vox_pc = tf.tile(vox_pc,multiples=[4, 1, 1])
indices, counts = group_voxel(res, pts, vox_pc)
print(indices)
print(counts)
pts = tf.transpose(pts,perm=(0,2,1))
out = avg_voxel(res,pts,indices,counts)
out = tf.transpose(out,perm=(0,2,1))
write_ply(out, 'vox_pc.ply')
print(out.shape)
print(norm_pc.shape)
# feature_list = []
outs, idn, wgts = trilinear_devoxelize(norm_pc, out, 8)
print(outs.shape)
print(idn)
write_ply(outs, 'out.ply')
```
#### File: tf_ops/devoxelization/tf_devoxel_op_test.py
```python
import tensorflow as tf
import numpy as np
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'voxelization'))
from tf_devox import trilinear_devoxelize
# tf.enable_eager_execution()
class GroupTriTest(tf.test.TestCase):
def test(self):
pass
def test_grad(self):
with self.test_session():
feature = np.random.random((8, 3, 512)).astype(np.float32)
pts = np.random.random((8, 3, 2048)).astype(np.float32)
pts = tf.constant(pts)
features = tf.constant(feature)
res = 8
out, inds, wgts = trilinear_devoxelize(pts, features, res)
# print(out)
# print(inds)
# print(wgts)
err = tf.test.compute_gradient_error(features, (8, 3, 512), out, (8, 3, 2048))
print(err)
if __name__=='__main__':
tf.test.main()
```
#### File: pc_compress/utils/pc_utils.py
```python
import tensorflow as tf
import scipy.sparse
from sklearn.neighbors import KDTree
import numpy as np
import math
import multiprocessing as multiproc
from functools import partial
tf.enable_eager_execution()
def edges2A(edges, n_nodes, mode='P', sparse_mat_type=scipy.sparse.csr_matrix):
'''
note: assume no (i,i)-like edge
edges: <2xE>
'''
edges = np.array(edges).astype(int)
data_D = np.zeros(n_nodes, dtype=np.float32)
for d in range(n_nodes):
data_D[ d ] = len(np.where(edges[0] == d)[0]) # compute the number of node which pick node_i as their neighbor
if mode.upper() == 'M': # 'M' means max pooling, which use the same graph matrix as the adjacency matrix
data = np.ones(edges[0].shape[0], dtype=np.int32)
elif mode.upper() == 'P':
data = 1. / data_D[ edges[0] ]
else:
raise NotImplementedError("edges2A with unknown mode=" + mode)
return sparse_mat_type((data, edges), shape=(n_nodes, n_nodes))
def build_graph_core(batch_data):
try:
points = batch_data # 2048*3
n_points = points.shape[0]
edges, dis, cov, idx = knn_search(points)
edges_z = edges2A(edges, n_points, mode='M', sparse_mat_type=scipy.sparse.csr_matrix)
dis = np.asarray(dis)[:,1:]
dis = np.reshape(dis, -1)
return edges.T, edges_z, dis, cov, idx
except KeyboardInterrupt:
exit(-1)
def build_graph(point_cloud):
batch_size = point_cloud.shape[0]
num_point = point_cloud.shape[0]
point_dim = point_cloud.shape[1]
# point_cloud = point_cloud.eval()
batch_graph = []
Cov = np.zeros((batch_size, num_point, 9))
nn_idx = np.zeros((batch_size, num_point, 17))
# pool = multiproc.Pool(2) # 进程池,保证池中只有两个进程
# pool_func = partial(build_graph_core) # 先传一部分参数
# rets = pool.map(pool_func, point_cloud)
# pool.close()
rets = build_graph_core(point_cloud)
count = 0
for ret in rets:
point_graph, _, _, cov,graph_idx = ret
batch_graph.append(point_graph)
# Cov[count,:,:] = tf.convert_to_tensor(cov)
Cov[count,:,:] = cov
nn_idx[count,:,:] = graph_idx
count += 1
del rets
return batch_graph, Cov, nn_idx
def knn_search(point_cloud, knn=16, metric='minkowski', symmetric=True):
'''
Args:
:param point_cloud: Nx3
:param knn: k
return:
'''
assert(knn > 0)
#num_point = point_cloud.get_shape()[0].value
num_point = point_cloud.shape[0]
kdt = KDTree(point_cloud, leaf_size=30, metric=metric)
dis, idx = kdt.query(point_cloud, k=knn+1, return_distance=True)
cov = np.zeros((num_point, 9))
# Adjacency Matrix
adjdict = dict()
for i in range(num_point):
nn_index = idx[i] # nearest point index
# compute local covariance matrix 3*3=>1*9
cov[i] = np.cov(np.transpose(point_cloud[nn_index[1:]])).reshape(-1)
for j in range(knn):
if symmetric:
adjdict[(i, nn_index[j+1])] = 1
adjdict[(nn_index[j+1], i)] = 1
else:
adjdict[(i, nn_index[j + 1])] = 1
edges = np.array(list(adjdict.keys()), dtype=int).T
return edges, dis, cov, idx
def GridSamplingLayer(batch_size, meshgrid):
'''
output Grid points as a NxD matrix
params = {
'batch_size': 8
'meshgrid': [[-0.3,0.3,45],[-0.3,0.3,45]]
}
'''
ret = np.meshgrid(*[np.linspace(it[0], it[1], num=it[2]) for it in meshgrid])
ndim = len(meshgrid)
grid = np.zeros((np.prod([it[2] for it in meshgrid]), ndim), dtype=np.float32) # MxD 2025x2
for d in range(ndim):
grid[:, d] = np.reshape(ret[d], -1)
g = np.repeat(grid[np.newaxis, ...], repeats=batch_size, axis=0)
return g
if __name__=='__main__':
# meshgrid = [[-0.3,0.3,45],[-0.5,0.5,45]]
# out = GridSamplingLayer(3, meshgrid)
# print('meshgrid; ', out)
pcd = np.random.random((2,2048,3))
batch_graph, Cov, idx = build_graph(pcd)
print(batch_graph, Cov, idx.shape)
# pcd2 = tf.Variable(tf.random_uniform([2,2048,1,3]))
# idx = tf.to_int32(idx)
# nn_point = tf.Variable(tf.zeros((2, 2048, 17,1 ,3)))
# for i in range(2):
# for j in range(2048):
# nn_point[i,j].assign(tf.gather(pcd2[i],idx[i, j, :]))
# print(tf.reduce_max(nn_point,axis=2))
# tf.enable_eager_execution()
# pcd = np.random.random((2, 2048, 3))
# batch_graph, Cov, idx = build_graph(pcd)
# pcd2 = np.random.randint(0, 100, (2, 2048, 64))
# idx = tf.to_int32(idx)
# nn_point = np.zeros((2, 2048, 17, 64))
# nn_point[0:2, 0:2048] = tf.gather(pcd2[0:2], idx[0:2, 0:2048, :]).numpy()
# print(tf.reduce_max(nn_point,axis=2))
# nn_point2 = np.zeros((2, 2048, 17, 64))
# for i in range(2):
# for j in range(2048):
# nn_point2[i:j] = tf.gather(pcd2[i],idx[i, j, :]).numpy()
# print(tf.reduce_max(nn_point2,axis=2))
#print(tf.cast(idx[0][0][1],dtype=tf.int32))
#print(pcd[tf.cast(idx[0][0],dtype=tf.float32)])
#print(batch_graph)
exit(-1)
indices=[]
values=[]
for n,seq in enumerate(batch_graph[0]):
indices.append(zip([n]*len(seq),range(len(seq))))
values.append(seq)
index = batch_graph[0].nonzero()
print(index)
#print(tf.contrib.layers.dense_to_sparse(batch_graph[1]))
nn_point = np.zeros((2048,16))
for i in range(3):
idx = index[0] == i
ele = index[1][idx]
# ele = index[1][False]
#print(ele)
rand_idx = np.random.choice(len(ele), 16, replace=False)
#print(rand_idx)
ele = ele[rand_idx]
nn_point[i, :] = ele
#print(nn_point.shape)
#print(nn_point)
nn_point = nn_point.astype(np.int)
pcd = pcd.astype(np.int)
nn_point = pcd[0][nn_point]
nn_point = np.expand_dims(nn_point,axis=0)
print('---------------')
print(nn_point)
print(nn_point.shape)
pcd = np.expand_dims(pcd,axis=2)
print(pcd)
print(pcd.shape)
nn_point = np.concatenate(nn_point, pcd)
nn_point = tf.convert_to_tensor(nn_point)
nn_point = tf.reduce_max(nn_point,axis=1)
nn_point = tf.maximum(nn_point,tf.squeeze(pcd,axis=0))
print(nn_point)
#print(nn_point)
#print(pcd[0][nn_point[0][15]])
np.maximum(pcd[0][nn_point],pcd)
#ele = index[1][idx]
``` |
{
"source": "jingzbu/InverseVITraffic",
"score": 2
} |
#### File: InverseVITraffic/03_OD_matrix_estimation_journal18/Apr_weekend_02_OD_matrix_estimation_GLS.py
```python
from util import *
from util_data_storage_and_load import *
import numpy as np
from numpy.linalg import inv
from scipy.sparse import csr_matrix, csc_matrix
import json
with open('../temp_files/new_route_dict_journal.json', 'r') as json_file:
new_route_dict = json.load(json_file)
number_of_routes = len(new_route_dict)
link_label_dict = zload('../temp_files/link_label_dict_journal.pkz')
number_of_links = len(link_label_dict)
# implement GLS method to estimate OD demand matrix
def GLS(x, A, L):
"""
x: sample matrix, each column is a link flow vector sample; number_of_links * K
A: path-link incidence matrix
P: logit route choice probability matrix
L: dimension of xi
----------------
return: xi
----------------
"""
K = np.size(x, 1)
S = samp_cov(x)
#print("rank of S is: \n")
#print(matrix_rank(S))
#print("sizes of S are: \n")
#print(np.size(S, 0))
#print(np.size(S, 1))
inv_S = inv(S).real
A_t = A.transpose()
Q_ = A_t * inv_S * A
Q_ = Q_.real
#Q = adj_PSD(Q_).real # Ensure Q to be PSD
Q = Q_
#print("rank of Q is: \n")
#print(matrix_rank(Q))
#print("sizes of Q are: \n")
#print(np.size(Q, 0))
#print(np.size(Q, 1))
b = sum([A_t * inv_S * x[:, k] for k in range(K)])
# print(b[0])
# assert(1==2)
model = Model("OD_matrix_estimation")
xi = []
for l in range(L):
xi.append(model.addVar(name='xi_' + str(l)))
model.update()
# Set objective: (K/2) xi' * Q * xi - b' * xi
obj = 0
for i in range(L):
for j in range(L):
obj += (1.0 / 2) * K * xi[i] * Q[i, j] * xi[j]
for l in range(L):
obj += - b[l] * xi[l]
model.setObjective(obj)
# Add constraint: xi >= 0
for l in range(L):
model.addConstr(xi[l] >= 0)
#model.addConstr(xi[l] <= 5000)
#fictitious_OD_list = zload('../temp_files/fictitious_OD_list')
#for l in fictitious_OD_list:
#model.addConstr(xi[l] == 0)
model.update()
#model.setParam('OutputFlag', False)
model.optimize()
xi_list = []
for v in model.getVars():
# print('%s %g' % (v.varName, v.x))
xi_list.append(v.x)
# print('Obj: %g' % obj.getValue())
return xi_list
# implement GLS method to estimate OD demand matrix
def GLS_with_known_P(x, A, P, L):
"""
x: sample matrix, each column is a link flow vector sample; 24 * K
A: path-link incidence matrix
P: logit route choice probability matrix
L: dimension of lam
----------------
return: lam
----------------
"""
K = np.size(x, 1)
S = samp_cov(x)
#print("rank of S is: \n")
#print(matrix_rank(S))
#print("sizes of S are: \n")
#print(np.size(S, 0))
#print(np.size(S, 1))
inv_S = inv(S).real
A_t = A.transpose()
P_t = P.transpose()
# PA'
PA_t = P * A_t
# AP_t
AP_t = PA_t.transpose()
Q_ = PA_t * inv_S * AP_t
Q_ = Q_.real
#Q = adj_PSD(Q_).real # Ensure Q to be PSD
Q = Q_
b = sum([PA_t * inv_S * x[:, k] for k in range(K)])
model = Model("OD_matrix_estimation")
lam = []
for l in range(L):
lam.append(model.addVar(name='lam_' + str(l)))
model.update()
# Set objective: (K/2) lam' * Q * lam - b' * lam
obj = 0
for i in range(L):
for j in range(L):
obj += (1.0 / 2) * K * lam[i] * Q[i, j] * lam[j]
for l in range(L):
obj += - b[l] * lam[l]
model.setObjective(obj)
# Add constraint: lam >= 0
for l in range(L):
model.addConstr(lam[l] >= 0)
model.update()
model.setParam('OutputFlag', False)
model.optimize()
lam_list = []
for v in model.getVars():
# print('%s %g' % (v.varName, v.x))
lam_list.append(v.x)
# print('Obj: %g' % obj.getValue())
return lam_list
# load link_route incidence matrix
A = zload('../temp_files/link_route_incidence_matrix_journal.pkz')
A = A.todense()
# load link counts data
with open('../temp_files/link_day_minute_Apr_dict_journal_JSON.json', 'r') as json_file:
link_day_minute_Apr_dict_JSON = json.load(json_file)
weekend_Apr_list = [1, 7, 8, 14, 15, 21, 22, 28, 29]
# weekend_Apr_list = [9, 10, 11, 12, 13]
feasible_link_dict = zload('../temp_files/feasible_link_dict_journal.pkz')
link_day_minute_Apr_list = []
for link_idx in [feasible_link_dict[idx] for idx in range(len(feasible_link_dict))]:
for day in weekend_Apr_list:
for minute_idx in range(120):
key = 'link_' + str(link_idx) + '_' + str(day)
link_day_minute_Apr_list.append(link_day_minute_Apr_dict_JSON[key] ['PM_flow_minute'][minute_idx])
x = np.matrix(link_day_minute_Apr_list)
x = np.matrix.reshape(x, len(feasible_link_dict), 1080)
# x = np.matrix.reshape(x, len(feasible_link_dict), 600)
# print(np.size(x,0), np.size(x,1))
x = np.nan_to_num(x)
# print(np.size(x,0), np.size(x,1))
# y = np.array(np.transpose(x))
# y = y[np.all(y != 0, axis=1)]
# x = np.transpose(y)
# x = np.matrix(x)
# print(np.size(x,0), np.size(x,1))
# print(x[:,:2])
# print(np.size(A,0), np.size(A,1))
# load logit_route_choice_probability_matrix
P = zload('../temp_files/OD_pair_route_incidence_journal.pkz')
P = P.todense()
L = np.size(P, 1) # dimension of xi
assert(L == number_of_routes)
# xi_list = GLS(x, A, number_of_routes)
lam_list = GLS_with_known_P(x, A, P, number_of_routes)
```
#### File: InverseVITraffic/08_develop_new_OD_demand_estimator_MA_Dijkstra_uni_class_Apr_PM/Compute_Jacobian_MA.py
```python
from util import *
import json
with open('./benchmark_data/MA_net.txt') as MA_flow:
MA_flow_lines = MA_flow.readlines()
MA_links = []
i = -8
for line in MA_flow_lines:
i += 1
if i > 0:
MA_links.append(line.split(' ')[1:3])
numLinks = i
link_list_js = [str(int(MA_links[i][0])) + ',' + str(int(MA_links[i][1])) for \
i in range(len(MA_links))]
link_list_pk = [str(int(MA_links[i][0])) + '->' + str(int(MA_links[i][1])) for \
i in range(len(MA_links))]
numNodes = max([int(MA_links[i][1]) for i in range(numLinks)])
from collections import defaultdict
node_neighbors_dict = defaultdict(list)
for node in range(numNodes):
for link in MA_links:
if node == int(link[0]):
node_neighbors_dict[str(node)].append(int(link[1]))
with open('./benchmark_data/MA_trips.txt') as MA_trips:
MA_trips_lines = MA_trips.readlines()
numZones = int(MA_trips_lines[0].split(' ')[3])
od_pairs = []
for i in range(numZones+1)[1:]:
for j in range(numZones+1)[1:]:
if i != j:
od_pairs.append([i, j])
numODpairs = len(od_pairs)
# create O-D pair labels
# create a dictionary mapping O-D pairs to labels
import json
OD_pair_label_dict = {}
OD_pair_label_dict_ = {}
label = 1
for i in range(numZones + 1)[1:]:
for j in range(numZones + 1)[1:]:
key = (i, j)
OD_pair_label_dict[str(key)] = label
OD_pair_label_dict_[str(label)] = key
label += 1
with open('../temp_files/od_pair_label_dict_MA.json', 'w') as json_file:
json.dump(OD_pair_label_dict, json_file)
with open('../temp_files/od_pair_label_dict__MA.json', 'w') as json_file:
json.dump(OD_pair_label_dict_, json_file)
OD_pair_label_dict_refined = {}
OD_pair_label_dict_refined_ = {}
label = 1
for i in range(numZones + 1)[1:]:
for j in range(numZones + 1)[1:]:
if i != j:
key = (i, j)
OD_pair_label_dict_refined[str(key)] = label
OD_pair_label_dict_refined_[str(label)] = key
label += 1
with open('../temp_files/od_pair_label_dict_MA_refined.json', 'w') as json_file:
json.dump(OD_pair_label_dict_refined, json_file)
with open('../temp_files/od_pair_label_dict__MA_refined.json', 'w') as json_file:
json.dump(OD_pair_label_dict_refined_, json_file)
# create link labels
# create a dictionary mapping directed links to labels
link_label_dict = {}
link_label_dict_ = {}
for i in range(numLinks):
link_label_dict[str(i)] = link_list_js[i]
for i in range(numLinks):
link_label_dict_[link_list_js[i]] = i
with open('../temp_files/link_label_dict_MA.json', 'w') as json_file:
json.dump(link_label_dict, json_file)
with open('../temp_files/link_label_dict_MA_.json', 'w') as json_file:
json.dump(link_label_dict_, json_file)
# create link labels
# create a dictionary mapping directed links to labels
link_label_dict = {}
link_label_dict_ = {}
for i in range(numLinks):
link_label_dict[str(i)] = link_list_pk[i]
for i in range(numLinks):
link_label_dict_[link_list_pk[i]] = i
zdump(link_label_dict, '../temp_files/link_label_dict_MA_network.pkz')
zdump(link_label_dict_, '../temp_files/link_label_dict_MA_network_.pkz')
link_length_list = []
with open('./benchmark_data/MA_net.txt', 'r') as f:
read_data = f.readlines()
flag = 0
for row in read_data:
if ';' in row:
flag += 1
if flag > 1:
link_length_list.append(float(row.split(' ')[4]))
link_label_dict = zload('../temp_files/link_label_dict_MA_network.pkz')
link_label_dict_ = zload('../temp_files/link_label_dict_MA_network_.pkz')
import networkx as nx
def jacobianSpiess(numNodes, numLinks, numODpairs, od_pairs, link_list_js, link_length_list):
MA = nx.DiGraph()
MA.add_nodes_from(range(numNodes+1)[1:])
MA_weighted_edges = [(int(link_list_js[i].split(',')[0]), int(link_list_js[i].split(',')[1]), \
link_length_list[i]) for i in range(len(link_list_js))]
MA.add_weighted_edges_from(MA_weighted_edges)
path = nx.all_pairs_dijkstra_path(MA)
od_route_dict = {}
for od in od_pairs:
origi = od[0]
desti = od[1]
key = OD_pair_label_dict_refined[str((origi, desti))]
route = str(path[origi][desti]).replace("[", "").replace(", ", "->").replace("]", "")
od_route_dict[key] = route
od_link_dict = {}
for idx in range(len(od_route_dict)):
od_link_list = []
od_node_list = od_route_dict[idx+1].split('->')
for i in range(len(od_node_list)):
if i < len(od_node_list) - 1:
od_link_list.append(link_label_dict_[od_node_list[i] + '->' + od_node_list[i+1]])
od_link_dict[idx] = od_link_list
jacob = np.zeros((numODpairs, numLinks))
for i in range(numODpairs):
for j in range(numLinks):
if j in od_link_dict[i]:
jacob[i, j] = 1
return jacob
jacob = jacobianSpiess(numNodes, numLinks, numODpairs, od_pairs, link_list_js, link_length_list)
```
#### File: InverseVITraffic/Python_files/anomaly_detection_stationary_actual.py
```python
from __future__ import absolute_import, division
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from util import *
from util_ano_detec import *
from load_dicts import *
import pylab
from pylab import *
import json
def detec_stati(N, n):
"""
N: number of states in the original chain
n: num of samples per window
"""
traffic_data_with_anomaly = zload('../temp_files/traffic_data_with_anomaly_actual.pkz')
traffic_data_ref = zload('../temp_files/traffic_data_ref_actual.pkz')
tmc = '129+04292'
month = 7
day_list = [2, 3, 5, 9, 10, 11, 12, 16, 17, 18] # July 4 is a holiday; we do not include Fridays
traffic_data_ref_list = []
for hour in range(24):
for minute in range(60):
for day in day_list:
key = str(tmc) + '_' + str(month) + '_' + str(day) + '_' + str(hour) + '_' + str(minute)
traffic_data_ref_list.append(traffic_data_ref[key])
inf = min(traffic_data_ref_list)
sup = max(traffic_data_ref_list)
traffic_data_ref_list_quantized = [quantize(s, N, inf, sup) for s in traffic_data_ref_list]
mapping_dict = {}
for i in range(N):
for j in range(N):
mapping_dict[(i, j)] = i * N + j
traffic_data_ref_list_quantized_trans = [mapping_dict[(traffic_data_ref_list_quantized[i], \
traffic_data_ref_list_quantized[i+1])] \
for i in range(len(traffic_data_ref_list_quantized)-1)]
day = 19
traffic_data_with_anomaly_list = []
for hour in range(24):
for minute in range(60):
key = str(tmc) + '_' + str(month) + '_' + str(day) + '_' + str(hour) + '_' + str(minute)
traffic_data_with_anomaly_list.append(traffic_data_with_anomaly[key])
traffic_data_with_anomaly_list_quantized = [quantize(s, N, inf, sup) for s in traffic_data_with_anomaly_list]
traffic_data_with_anomaly_list_quantized_trans = \
[mapping_dict[(traffic_data_with_anomaly_list_quantized[i], \
traffic_data_with_anomaly_list_quantized[i+1])] \
for i in range(len(traffic_data_with_anomaly_list_quantized)-1)]
mu_1 = mu_est(traffic_data_ref_list_quantized_trans, N) # normal PL
mu_1 = mu_adjust(mu_1) # normal PL
mu_01, mu1, mu_11, P1, G_11, H_11, U_11 = ChainGen_(mu_1)
zdump([mu1, mu_11, P1, G_11, H_11, U_11], '../temp_files/Traffic_ano_detec_PLs_(%s_%s)_actual.pkz'%(N,n))
num_test_sample = 24 * 60 - n
beta = 0.001
eta_wc = {}
eta_Sanov = {}
# Get thresholds for Hoeffding's test corresponding to sample length n
key = str(n) + '_' + str(beta)
G_list = [G_11]
H_list = [H_11]
U_list = [U_11]
eta_1 = HoeffdingRuleMarkovRobust_(beta, G_list, H_list, U_list, n)
eta_2 = - log(beta) / n
eta_wc[key] = eta_1
eta_Sanov[key] = eta_2
zdump([eta_wc, eta_Sanov], '../temp_files/traffic_ano_detec_threshold_(%s_%s)_actual.pkz'%(N,n))
time_range = range(num_test_sample)
eta_wc_list = []
eta_Sanov_list = []
for idx in time_range:
eta_wc_list.append(np.array(eta_wc[key]).tolist())
eta_Sanov_list.append(np.array(eta_Sanov[key]).tolist())
test_sample = []
for idx in range(num_test_sample):
test_sample.append(traffic_data_with_anomaly_list_quantized_trans[idx : (idx+n)])
KL = []
key = str(n) + '_' + str(beta)
for idx in range(num_test_sample):
KL.append(KL_est(test_sample[idx], mu_11))
zdump(KL, '../temp_files/traffic_ano_detec_KL_(%s_%s)_actual.pkz'%(N,n))
# Output the time instances reporting an anomaly
alarm_instance_WC_list = []
for idx in range(num_test_sample):
if KL[idx] > eta_wc_list[idx]:
# print('(WC-stationary) The earliest time instance detecting the anomaly is: %s' %(idx + n))
# break
alarm_instance_WC_list.append(idx + n)
alarm_instance_Sanov_list = []
for idx in range(num_test_sample):
if KL[idx] > eta_Sanov_list[idx]:
# print('(Sanov-stationary) The earliest time instance detecting the anomaly is: %s' %(idx + n))
# break
alarm_instance_Sanov_list.append(idx + n)
alarm_instance_dict = {}
alarm_instance_dict['WC'] = alarm_instance_WC_list
alarm_instance_dict['Sanov'] = alarm_instance_Sanov_list
with open('../temp_files/alarm_instance_dict_stationary_(%s_%s).json'%(N,n), 'w') as json_file:
json.dump(alarm_instance_dict, json_file)
plot_points(time_range, KL, eta_wc_list)
plt.ylabel('divergence')
plt.xlabel('time (min)')
pylab.ylim(-0.01, max(KL)+0.1)
pylab.xlim(0, 24 * 60)
plt.savefig('../temp_files/detec_results_(%s_%s)_WC_actual.eps'%(N,n))
# plt.show()
plot_points(time_range, KL, eta_Sanov_list)
plt.ylabel('divergence')
plt.xlabel('time (min)')
pylab.ylim(-0.01, max(KL)+0.1)
pylab.xlim(0, 24 * 60)
plt.savefig('../temp_files/detec_results_(%s_%s)_Sanov_actual.eps'%(N,n))
# plt.show()
``` |
{
"source": "jingz/Flask-Boost",
"score": 2
} |
#### File: migrations/versions/20170110114140_fba2ed6ae503_create_init_user_management.py
```python
revision = 'fba<PASSWORD>'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('role',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('avatar', sa.String(length=200), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('confirm_at', sa.DateTime(), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('auth_provider_id', sa.String(length=255), nullable=True),
sa.Column('auth_provider_user_id', sa.String(length=255), nullable=True),
sa.Column('auth_provider_profile_pic', sa.String(length=512), nullable=True),
sa.Column('auth_provider_access_token', sa.String(length=255), nullable=True),
sa.Column('auth_provider_secret', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('name')
)
op.create_table('users_roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('users_roles')
op.drop_table('user')
op.drop_table('role')
```
#### File: application/extensions/json_validator.py
```python
from functools import wraps
from flask import request
import jsonschema as js
from .errors import error_response
def validate(schema):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
v = js.Draft4Validator(schema)
errors = list()
for error in sorted(v.iter_errors(request.json), key=str):
# TODO more messages format
errors.append(error.message)
if errors:
return error_response(errors)
return func(*args, **kwargs)
return wrapper
return decorator
# Schema Example
# http://json-schema.org/example1.html
```
#### File: application/extensions/searchable_mixin.py
```python
from sqlalchemy import sql
import re
try:
basestring
except NameError:
basestring = str
# Enhance Front-End-Like search API for SQLAlchemy Model
class SearchableMixin(object):
@classmethod
def search(cls, di, sel=[]):
"""
SPECS
SQL Predication
===== ===========
= eq
!= not_eq
LIKE like
NOT LIKE not_like
< lt
> gt
<= lte
>= gte
in in
not_in not_in
example given "di" is dict argument : { 'name_like': 'jing' }
generate where clause : name like '%jing%'
User.search({'name_like': 'sarunyoo'}) will return User's db session
return raw session scoped by the class
"""
filter_clause, filter_params = cls._build_search_clause(di)
sort_ = sql.text(cls._build_sort_clause(di))
filter_clause = sql.text(filter_clause)
if len(sel) == 0:
return cls.query.filter(filter_clause).\
params(**filter_params).order_by(sort_)
else:
return cls.query.session.query(*sel).filter(filter_clause).\
params(**filter_params).order_by(sort_)
SEARCH_OPT_REGEXP = r'(((?<!not)_eq)|(_not_eq)|((?<!not)_like)|(_not_like)|((?<!not)_contains)|(_not_contains)|(_lt)|(_gt)|(_lte)|(_gte)|((?<!not)_in)|(_not_in))$'
SEARCH_OPT_MAPPER = {
'eq': '=',
'not_eq': '<>',
'like': 'like',
'contains': 'like',
'not_like': 'not like',
'not_contains': 'not like',
'lt': '<',
'gt': '>',
'gte': '>=',
'lte': '<=',
'in': 'in',
'not_in': 'not in',
}
SEARCH_OPT_TEXT_MAPPER = {
'eq': 'equal to',
'not_eq': 'not equal to',
'like': 'contains',
'contains': 'contains',
'not_like': 'not contains',
'not_contains': 'not contains',
'lt': 'less than',
'gt': 'greater than',
'gte': 'greater than and equal to',
'lte': 'less than and equal to',
'in': 'in any',
'not_in': 'not in any',
}
@classmethod
def explain_search_clause(cls, di, transform_key=True):
text = []
for k, v in di.items():
col, opt = cls._grep_search_opt(k)
if col and opt:
read_pattern = u"{col} {text_opt} {v}"
to = cls.SEARCH_OPT_TEXT_MAPPER.get(opt)
_col = col
if transform_key:
_col = _col.title().replace('_', ' ')
text.append(read_pattern.format(col=_col, text_opt=to, v=v))
return u" and ".join(text)
@classmethod
def _build_search_clause(cls, di):
filter_clause = []
filter_params = {}
for k, v in di.items():
col, opt = cls._grep_search_opt(k)
if col and opt:
__where = "%s.{col} {opt} :{column_param}" % cls.__tablename__
sql_opt = cls.SEARCH_OPT_MAPPER.get(opt)
if opt in ('like', 'not_like', 'contains', 'not_contains'):
# wrap value with '%'
if isinstance(v, basestring):
if re.match('^%.*', v) or re.match('.*%$', v):
pass
else:
v = u"%{0}%".format(v)
if opt == 'in' or opt == 'not_in':
# in is a special case to
# serialize value and key into where statement
in_params_series = [] # :column_name_key_1 ...
n = len(v)
i = 1
while n != 0:
_k = ":%s_%d" % (k, i)
in_params_series.append(_k)
# set binding params
filter_params.setdefault(_k[1:], v[i-1])
i += 1
n -= 1
__where = u"{col} {opt} (%s)" % ", ".join(in_params_series)
filter_text = __where.format(col=col, opt=sql_opt)
filter_clause.append(filter_text)
continue
if sql_opt is not None:
filter_text = __where.format(col=col, opt=sql_opt, column_param=k)
filter_clause.append(filter_text)
filter_params.setdefault(k, v)
else:
continue
else:
continue
filter_clause = " and ".join(filter_clause)
return (filter_clause, filter_params)
@classmethod
def _grep_search_opt(cls, text):
match = re.search(cls.SEARCH_OPT_REGEXP, text);
if match is not None:
col, _ = text.split(match.group(0)) # client_code, ''
opt = match.group(0)[1:] # remove a leading _
# match but the matched column is not in mapper
if col not in [__col.name for __col in cls.__mapper__.c]:
col = opt = None
# could be just column name
elif text in [__col.name for __col in cls.__mapper__.c]:
col = text
opt = 'eq'
else:
col = opt = None
return (col, opt)
@classmethod
def _build_sort_clause(cls, di):
orders = []
for k, v in di.items():
col, direction, priority = cls._grep_sort_opt(k, v)
if col is not None:
orders.append({ 'column': col, 'direction': direction,
'priority': priority})
# sort order by priority
if len(orders) > 0:
proc = lambda k: k['priority']
order_pattern = u"%s.{column} {direction}" % cls.__tablename__
return ", ".join([order_pattern.format(**d) for d in sorted(orders, key=proc)])
else:
return ""
SORT_KEY_REGEXP = r'^sort_by_(.*)'
SORT_VALUE_REGEXP = r'(asc|desc)_?((?<=)\d+)?' # asc_1
@classmethod
def _grep_sort_opt(cls, key_text, value_text):
match_key = re.search(cls.SORT_KEY_REGEXP, key_text)
match_val = re.search(cls.SORT_VALUE_REGEXP, value_text, re.IGNORECASE)
if match_key is not None and match_val is not None:
col = match_key.group(1) # client_code
collate = False
if col.startswith('$'):
collate = True
col = col[1:]
direction, priority = match_val.group(1), match_val.group(2)
if direction is None:
direction = "ASC"
if collate:
direction = ("COLLATE NOCASE %s" % direction)
if priority is None:
priority = 99
else:
priority = int(priority)
# match but the matched column is not in mapper
if col not in [__col.name for __col in cls.__mapper__.c]:
col = direction = priority = None
else:
col = direction = priority = None
return (col, direction, priority)
```
#### File: application/utils/admin.py
```python
import datetime, re
from flask import g, abort, request
from functools import wraps
def convert_params_for_search_format(model):
params = request.args.to_dict()
# remove page if have
params.pop('page', None)
search_params = params.copy()
for k in params:
if not params[k]: search_params.pop(k)
for k, col_def in model.columns().items():
value = search_params.get(k)
if not value: continue
ptype = col_def.get('python_type')
if ptype is datetime.datetime or \
ptype is datetime.date:
dt_btw = value.split('to')
if len(dt_btw) == 2:
search_params.pop(k)
search_params[k+'_gte'] = dt_btw[0].strip()
search_params[k+'_lte'] = dt_btw[1].strip()
return search_params, params
rexp_sort_by = re.compile("^sort_by")
def make_filter_params():
params = request.args.to_dict()
params.pop('page', None)
filter_params = params.copy()
# for k in params:
# # remove empty value
# if not params[k]: filter_params.pop(k)
# # remove sort_by keys
# if rexp_sort_by.match(k):
# sort_factors = filter_params.get(k).split('_')
# direction = sort_factors[0]
# if len(sort_factors) == 2:
# priority = sort_factors[1]
# else:
# priority = '1'
# if direction == 'asc':
# filter_params[k] = 'desc_' + priority
# else:
# filter_params[k] = 'asc_' + priority
# current_app.logger.debug(filter_params)
return filter_params
def admin_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if g.user and g.user.is_admin:
return func(*args, **kwargs)
else:
return abort(403)
return wrapper
def pagination(q, page, page_size=20):
total = q.count()
page = int(page)
offset = page_size * (page - 1)
q = q.offset(offset).limit(page_size)
has_next = (page * page_size) < total
has_prev = page > 1
end = offset + page_size if has_next else total
return q, dict( page=page,
start=offset + 1,
end=end,
has_next=has_next,
next_page=(page + 1 if has_next else None),
has_prev=has_prev,
prev_page=(page - 1 if has_prev else None),
per_page=page_size,
total=total )
``` |
{
"source": "JingZhang918/AI-for-trading",
"score": 2
} |
#### File: .Trash-0/files/helper.py
```python
import scipy.stats
from colour import Color
import numpy as np
import pandas as pd
import os
import tempfile
import zipfile
import glob
import quandl
import plotly as py
import plotly.graph_objs as go
import plotly.offline as offline_py
from sklearn.preprocessing import Normalizer
offline_py.init_notebook_mode(connected=True)
_color_scheme = {
'background_label': '#9dbdd5',
'low_value': '#B6B2CF',
'high_value': '#2D3ECF',
'y_axis_2_text_color': 'grey',
'shadow': 'rgba(0, 0, 0, 0.75)'}
def _generate_stock_trace(df):
return go.Candlestick(
x=df['date'],
open=df['adj_open'],
high=df['adj_high'],
low=df['adj_low'],
close=df['adj_close'],
showlegend=False)
def _generate_config():
return {'showLink': False, 'displayModeBar': False, 'showAxisRangeEntryBoxes': True}
def _generate_buy_annotations(df, signal_column):
return [{
'x': row['date'], 'y': row['adj_close'], 'text': 'Long', 'bgcolor': _color_scheme['background_label'],
'ayref': 'y', 'ax': 0, 'ay': 20}
for _, row in df[df[signal_column] == 1].iterrows()]
def _generate_sell_annotations(df, signal_column):
return [{
'x': row['date'], 'y': row['adj_close'], 'text': 'Short', 'bgcolor': _color_scheme['background_label'],
'ayref': 'y', 'ax': 0, 'ay': 160}
for _, row in df[df[signal_column] == -1].iterrows()]
def download_quandl_dataset(database, dataset, save_path, columns, tickers, start_date, end_date):
"""
Download a dataset from Quandl and save it to `save_path`.
Filter by columns, tickers, and date
:param database: The Quandl database to download from
:param dataset: The dataset to download
:param save_path: The path to save the dataset
:param columns: The columns to save
:param tickers: The tickers to save
:param start_date: The rows to save that are older than this date
:param end_date: The rows to save that are younger than this date
"""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_wiki_file = tmp_dir + 'tmp.zip'
quandl.bulkdownload(database, dataset_code=dataset, filename=tmp_wiki_file)
# Unzip downloaded data
zip_ref = zipfile.ZipFile(tmp_wiki_file, 'r')
zip_ref.extractall(tmp_dir)
zip_ref.close()
# Check if the zip file only contains one csv file
# We're assuming that Quandl will always give us the data in a single csv file.
# If it's different, we want to throw an error.
csv_files = glob.glob(os.path.join(tmp_dir, '*.csv'))
assert len(csv_files) == 1,\
'Bulk download of Quandl Wiki data failed. Wrong number of csv files found. Found {} file(s).'\
.format(len(csv_files))
tmp_csv_file = csv_files[0]
names = quandl.get_table('{}/{}'.format(database, dataset), ticker='EMPTY_RESULTS_TICKER').columns.values
tmp_df = pd.read_csv(tmp_csv_file, names=names)
tmp_df['date'] = pd.to_datetime(tmp_df['date'])
# Remove unused data and save
tmp_df = tmp_df[tmp_df['date'].isin(pd.date_range(start_date, end_date))] # Filter unused dates
tmp_df = tmp_df[tmp_df['ticker'].isin(tickers)] # Filter unused tickers
tmp_df.to_csv(save_path, columns=columns, index=False) # Filter unused columns and save
def _generate_second_tetration_stock(stock_symbol, dates):
"""
Generate stock that follows the second tetration curve
:param stock_symbol: Stock Symbol
:param dates: Dates for ticker
:return: Stock data
"""
n_stock_columns = 5
linear_line = np.linspace(1, 5, len(dates))
all_noise = ((np.random.rand(n_stock_columns, len(dates)) - 0.5) * 0.01)
sector_stock = pd.DataFrame({
'ticker': stock_symbol,
'date': dates,
'base_line': np.power(linear_line, linear_line)})
sector_stock['base_line'] = sector_stock['base_line'] + all_noise[0]*sector_stock['base_line']
sector_stock['adj_open'] = sector_stock['base_line'] + all_noise[1]*sector_stock['base_line']
sector_stock['adj_close'] = sector_stock['base_line'] + all_noise[2]*sector_stock['base_line']
sector_stock['adj_high'] = sector_stock['base_line'] + all_noise[3]*sector_stock['base_line']
sector_stock['adj_low'] = sector_stock['base_line'] + all_noise[4]*sector_stock['base_line']
sector_stock['adj_high'] = sector_stock[['adj_high', 'adj_open', 'adj_close']].max(axis=1)
sector_stock['adj_low'] = sector_stock[['adj_low', 'adj_open', 'adj_close']].min(axis=1)
return sector_stock.drop(columns='base_line')
def generate_tb_sector(dates):
"""
Generate TB sector of stocks
:param dates: Dates that stocks should have market data on
:return: TB sector stocks
"""
symbol_length = 6
stock_names = [
'kaufmanniana', 'clusiana', 'greigii', 'sylvestris', 'turkestanica', 'linifolia', 'gesneriana',
'humilis', 'tarda', 'saxatilis', 'dasystemon', 'orphanidea', 'kolpakowskiana', 'praestans',
'sprengeri', 'bakeri', 'pulchella', 'biflora', 'schrenkii', 'armena', 'vvedenskyi', 'agenensis',
'altaica', 'urumiensis']
return [
_generate_second_tetration_stock(stock_name[:symbol_length].upper(), dates)
for stock_name in stock_names]
def get_signal_return_pval(signal_return):
"""
Calculate p-value from signal returns
:param signal_return: Signal returns
:return: P-value
"""
signal_return_mean = signal_return.mean()
s_hat_5 = np.std(signal_return, ddof=1) / np.sqrt(len(signal_return))
t_5 = signal_return_mean / s_hat_5
return scipy.stats.t.sf(np.abs(t_5), len(signal_return) - 1)
def plot_stock(df, title):
config = _generate_config()
layout = go.Layout(title=title)
stock_trace = _generate_stock_trace(df)
offline_py.iplot({'data': [stock_trace], 'layout': layout}, config=config)
def plot_high_low(df, title):
config = _generate_config()
layout = go.Layout(title=title)
stock_trace = _generate_stock_trace(df)
high_trace = go.Scatter(
x=df['date'],
y=df['lookback_high'],
name='Column lookback_high',
line={'color': _color_scheme['high_value']})
low_trace = go.Scatter(
x=df['date'],
y=df['lookback_low'],
name='Column lookback_low',
line={'color': _color_scheme['low_value']})
offline_py.iplot({'data': [stock_trace, high_trace, low_trace], 'layout': layout}, config=config)
def plot_signal(df, title, signal_column):
config = _generate_config()
buy_annotations = _generate_buy_annotations(df, signal_column)
sell_annotations = _generate_sell_annotations(df, signal_column)
layout = go.Layout(
title=title,
annotations=buy_annotations + sell_annotations)
stock_trace = _generate_stock_trace(df)
offline_py.iplot({'data': [stock_trace], 'layout': layout}, config=config)
def plot_lookahead_prices(df, columns, title):
config = _generate_config()
layout = go.Layout(title=title)
colors = Color(_color_scheme['low_value']).range_to(Color(_color_scheme['high_value']), len(columns))
traces = [_generate_stock_trace(df)]
for column, color in zip(columns, colors):
traces.append(
go.Scatter(
x=df['date'],
y=df[column],
name='Column {}'.format(column),
line={'color': str(color)}))
offline_py.iplot({'data': traces, 'layout': layout}, config=config)
def plot_price_returns(df, columns, title):
config = _generate_config()
layout = go.Layout(
title=title,
yaxis2={
'title': 'Returns',
'titlefont': {'color': _color_scheme['y_axis_2_text_color']},
'tickfont': {'color': _color_scheme['y_axis_2_text_color']},
'overlaying': 'y',
'side': 'right'})
colors = Color(_color_scheme['low_value']).range_to(Color(_color_scheme['high_value']), len(columns))
traces = [_generate_stock_trace(df)]
for column, color in zip(columns, colors):
traces.append(
go.Scatter(
x=df['date'],
y=df[column],
name='Column {}'.format(column),
line={'color': str(color)},
yaxis='y2'))
offline_py.iplot({'data': traces, 'layout': layout}, config=config)
def plot_signal_returns(df, signal_return_columns, signal_columns, titles):
config = _generate_config()
layout = go.Layout(
yaxis2={
'title': 'Signal Returns',
'titlefont': {'color': _color_scheme['y_axis_2_text_color']},
'tickfont': {'color': _color_scheme['y_axis_2_text_color']},
'overlaying': 'y',
'side': 'right'})
colors = Color(_color_scheme['low_value']).range_to(Color(_color_scheme['high_value']), len(signal_return_columns))
stock_trace = _generate_stock_trace(df)
for signal_return_column, signal_column, color, title in zip(signal_return_columns, signal_columns, colors, titles):
non_zero_signals = df[df[signal_return_column] != 0]
signal_return_trace = go.Scatter(
x=non_zero_signals['date'],
y=non_zero_signals[signal_return_column],
name='Column {}'.format(signal_return_column),
line={'color': str(color)},
yaxis='y2')
buy_annotations = _generate_buy_annotations(df, signal_column)
sell_annotations = _generate_sell_annotations(df, signal_column)
layout['title'] = title
layout['annotations'] = buy_annotations + sell_annotations
offline_py.iplot({'data': [stock_trace, signal_return_trace], 'layout': layout}, config=config)
def plot_series_histograms(series_list, title, subplot_titles):
assert len(series_list) == len(subplot_titles)
all_values = pd.concat(series_list)
x_range = [all_values.min(), all_values.max()]
y_range = [0, 1500]
config = _generate_config()
colors = Color(_color_scheme['low_value']).range_to(Color(_color_scheme['high_value']), len(series_list))
fig = py.tools.make_subplots(rows=1, cols=len(series_list), subplot_titles=subplot_titles, print_grid=False)
fig['layout'].update(title=title, showlegend=False)
for series_i, (series, color) in enumerate(zip(series_list, colors), 1):
filtered_series = series[series != 0].dropna()
trace = go.Histogram(x=filtered_series, marker={'color': str(color)})
fig.append_trace(trace, 1, series_i)
fig['layout']['xaxis{}'.format(series_i)].update(range=x_range)
fig['layout']['yaxis{}'.format(series_i)].update(range=y_range)
offline_py.iplot(fig, config=config)
def plot_series_to_normal_histograms(series_list, title, subplot_titles):
assert len(series_list) == len(subplot_titles)
all_values = pd.concat(series_list)
x_range = [all_values.min(), all_values.max()]
y_range = [0, 1500]
config = _generate_config()
fig = py.tools.make_subplots(rows=1, cols=len(series_list), subplot_titles=subplot_titles, print_grid=False)
fig['layout'].update(title=title)
for series_i, series in enumerate(series_list, 1):
filtered_series = series[series != 0].dropna()
filtered_series_trace = go.Histogram(
x=filtered_series,
marker={'color': _color_scheme['low_value']},
name='Signal Return Distribution',
showlegend=False)
normal_trace = go.Histogram(
x=np.random.normal(np.mean(filtered_series), np.std(filtered_series), len(filtered_series)),
marker={'color': _color_scheme['shadow']},
name='Normal Distribution',
showlegend=False)
fig.append_trace(filtered_series_trace, 1, series_i)
fig.append_trace(normal_trace, 1, series_i)
fig['layout']['xaxis{}'.format(series_i)].update(range=x_range)
fig['layout']['yaxis{}'.format(series_i)].update(range=y_range)
# Show legened
fig['data'][0]['showlegend'] = True
fig['data'][1]['showlegend'] = True
offline_py.iplot(fig, config=config)
```
#### File: .Trash-0/files/project_helper.py
```python
import helper
import scipy.stats
from colour import Color
import numpy as np
import pandas as pd
import plotly as py
import plotly.graph_objs as go
import plotly.offline as offline_py
offline_py.init_notebook_mode(connected=True)
def _generate_stock_trace(prices):
return go.Scatter(
name='Index',
x=prices.index,
y=prices,
line={'color': helper.color_scheme['main_line']})
def _generate_buy_annotations(prices, signal):
return [{
'x': index, 'y': price, 'text': 'Long', 'bgcolor': helper.color_scheme['background_label'],
'ayref': 'y', 'ax': 0, 'ay': 20}
for index, price in prices[signal == 1].iteritems()]
def _generate_sell_annotations(prices, signal):
return [{
'x': index, 'y': price, 'text': 'Short', 'bgcolor': helper.color_scheme['background_label'],
'ayref': 'y', 'ax': 0, 'ay': 160}
for index, price in prices[signal == -1].iteritems()]
def _generate_second_tetration_stock(stock_symbol, dates):
"""
Generate stock that follows the second tetration curve
:param stock_symbol: Stock Symbol
:param dates: Dates for ticker
:return: Stock data
"""
n_stock_columns = 5
linear_line = np.linspace(1, 5, len(dates))
all_noise = ((np.random.rand(n_stock_columns, len(dates)) - 0.5) * 0.01)
sector_stock = pd.DataFrame({
'ticker': stock_symbol,
'date': dates,
'base_line': np.power(linear_line, linear_line)})
sector_stock['base_line'] = sector_stock['base_line'] + all_noise[0]*sector_stock['base_line']
sector_stock['adj_open'] = sector_stock['base_line'] + all_noise[1]*sector_stock['base_line']
sector_stock['adj_close'] = sector_stock['base_line'] + all_noise[2]*sector_stock['base_line']
sector_stock['adj_high'] = sector_stock['base_line'] + all_noise[3]*sector_stock['base_line']
sector_stock['adj_low'] = sector_stock['base_line'] + all_noise[4]*sector_stock['base_line']
sector_stock['adj_high'] = sector_stock[['adj_high', 'adj_open', 'adj_close']].max(axis=1)
sector_stock['adj_low'] = sector_stock[['adj_low', 'adj_open', 'adj_close']].min(axis=1)
return sector_stock.drop(columns='base_line')
def generate_tb_sector(dates):
"""
Generate TB sector of stocks
:param dates: Dates that stocks should have market data on
:return: TB sector stocks
"""
symbol_length = 6
stock_names = [
'kaufmanniana', 'clusiana', 'greigii', 'sylvestris', 'turkestanica', 'linifolia', 'gesneriana',
'humilis', 'tarda', 'saxatilis', 'dasystemon', 'orphanidea', 'kolpakowskiana', 'praestans',
'sprengeri', 'bakeri', 'pulchella', 'biflora', 'schrenkii', 'armena', 'vvedenskyi', 'agenensis',
'altaica', 'urumiensis']
return [
_generate_second_tetration_stock(stock_name[:symbol_length].upper(), dates)
for stock_name in stock_names]
def get_signal_return_pval(signal_return):
"""
Calculate p-value from signal returns
:param signal_return: Signal returns
:return: P-value
"""
signal_return_series = signal_return.stack()
signal_return_mean = signal_return_series.mean()
s_hat_5 = np.std(signal_return_series, ddof=1) / np.sqrt(len(signal_return_series))
t_5 = signal_return_mean / s_hat_5
return scipy.stats.t.sf(np.abs(t_5), len(signal_return_series) - 1)
def plot_stock(prices, title):
config = helper.generate_config()
layout = go.Layout(title=title)
stock_trace = _generate_stock_trace(prices)
offline_py.iplot({'data': [stock_trace], 'layout': layout}, config=config)
def plot_high_low(prices, lookback_high, lookback_low, title):
config = helper.generate_config()
layout = go.Layout(title=title)
stock_trace = _generate_stock_trace(prices)
high_trace = go.Scatter(
x=lookback_high.index,
y=lookback_high,
name='Column lookback_high',
line={'color': helper.color_scheme['major_line']})
low_trace = go.Scatter(
x=lookback_low.index,
y=lookback_low,
name='Column lookback_low',
line={'color': helper.color_scheme['minor_line']})
offline_py.iplot({'data': [stock_trace, high_trace, low_trace], 'layout': layout}, config=config)
def plot_signal(price, signal, title):
config = helper.generate_config()
buy_annotations = _generate_buy_annotations(price, signal)
sell_annotations = _generate_sell_annotations(price, signal)
layout = go.Layout(
title=title,
annotations=buy_annotations + sell_annotations)
stock_trace = _generate_stock_trace(price)
offline_py.iplot({'data': [stock_trace], 'layout': layout}, config=config)
def plot_lookahead_prices(prices, lookahead_price_list, title):
config = helper.generate_config()
layout = go.Layout(title=title)
colors = Color(helper.color_scheme['low_value'])\
.range_to(Color(helper.color_scheme['high_value']), len(lookahead_price_list))
traces = [_generate_stock_trace(prices)]
for (lookahead_prices, lookahead_days), color in zip(lookahead_price_list, colors):
traces.append(
go.Scatter(
x=lookahead_prices.index,
y=lookahead_prices,
name='{} Day Lookahead'.format(lookahead_days),
line={'color': str(color)}))
offline_py.iplot({'data': traces, 'layout': layout}, config=config)
def plot_price_returns(prices, lookahead_returns_list, title):
config = helper.generate_config()
layout = go.Layout(
title=title,
yaxis2={
'title': 'Returns',
'titlefont': {'color': helper.color_scheme['y_axis_2_text_color']},
'tickfont': {'color': helper.color_scheme['y_axis_2_text_color']},
'overlaying': 'y',
'side': 'right'})
colors = Color(helper.color_scheme['low_value'])\
.range_to(Color(helper.color_scheme['high_value']), len(lookahead_returns_list))
traces = [_generate_stock_trace(prices)]
for (lookahead_returns, lookahead_days), color in zip(lookahead_returns_list, colors):
traces.append(
go.Scatter(
x=lookahead_returns.index,
y=lookahead_returns,
name='{} Day Lookahead'.format(lookahead_days),
line={'color': str(color)},
yaxis='y2'))
offline_py.iplot({'data': traces, 'layout': layout}, config=config)
def plot_signal_returns(prices, signal_return_list, titles):
config = helper.generate_config()
layout = go.Layout(
yaxis2={
'title': 'Signal Returns',
'titlefont': {'color': helper.color_scheme['y_axis_2_text_color']},
'tickfont': {'color': helper.color_scheme['y_axis_2_text_color']},
'overlaying': 'y',
'side': 'right'})
colors = Color(helper.color_scheme['low_value'])\
.range_to(Color(helper.color_scheme['high_value']), len(signal_return_list))
stock_trace = _generate_stock_trace(prices)
for (signal_return, signal, lookahead_days), color, title in zip(signal_return_list, colors, titles):
non_zero_signals = signal_return[signal_return != 0]
signal_return_trace = go.Scatter(
x=non_zero_signals.index,
y=non_zero_signals,
name='{} Day Lookahead'.format(lookahead_days),
line={'color': str(color)},
yaxis='y2')
buy_annotations = _generate_buy_annotations(prices, signal)
sell_annotations = _generate_sell_annotations(prices, signal)
layout['title'] = title
layout['annotations'] = buy_annotations + sell_annotations
offline_py.iplot({'data': [stock_trace, signal_return_trace], 'layout': layout}, config=config)
def plot_signal_histograms(signal_list, title, subplot_titles):
assert len(signal_list) == len(subplot_titles)
signal_series_list = [signal.stack() for signal in signal_list]
all_values = pd.concat(signal_series_list)
x_range = [all_values.min(), all_values.max()]
y_range = [0, 1500]
config = helper.generate_config()
colors = Color(helper.color_scheme['low_value']).range_to(Color(helper.color_scheme['high_value']), len(signal_series_list))
fig = py.tools.make_subplots(rows=1, cols=len(signal_series_list), subplot_titles=subplot_titles, print_grid=False)
fig['layout'].update(title=title, showlegend=False)
for series_i, (signal_series, color) in enumerate(zip(signal_series_list, colors), 1):
filtered_series = signal_series[signal_series != 0].dropna()
trace = go.Histogram(x=filtered_series, marker={'color': str(color)})
fig.append_trace(trace, 1, series_i)
fig['layout']['xaxis{}'.format(series_i)].update(range=x_range)
fig['layout']['yaxis{}'.format(series_i)].update(range=y_range)
offline_py.iplot(fig, config=config)
def plot_signal_to_normal_histograms(signal_list, title, subplot_titles):
assert len(signal_list) == len(subplot_titles)
signal_series_list = [signal.stack() for signal in signal_list]
all_values = pd.concat(signal_series_list)
x_range = [all_values.min(), all_values.max()]
y_range = [0, 1500]
config = helper.generate_config()
fig = py.tools.make_subplots(rows=1, cols=len(signal_series_list), subplot_titles=subplot_titles, print_grid=False)
fig['layout'].update(title=title)
for series_i, signal_series in enumerate(signal_series_list, 1):
filtered_series = signal_series[signal_series != 0].dropna()
filtered_series_trace = go.Histogram(
x=filtered_series,
marker={'color': helper.color_scheme['low_value']},
name='Signal Return Distribution',
showlegend=False)
normal_trace = go.Histogram(
x=np.random.normal(np.mean(filtered_series), np.std(filtered_series), len(filtered_series)),
marker={'color': helper.color_scheme['shadow']},
name='Normal Distribution',
showlegend=False)
fig.append_trace(filtered_series_trace, 1, series_i)
fig.append_trace(normal_trace, 1, series_i)
fig['layout']['xaxis{}'.format(series_i)].update(range=x_range)
fig['layout']['yaxis{}'.format(series_i)].update(range=y_range)
# Show legened
fig['data'][0]['showlegend'] = True
fig['data'][1]['showlegend'] = True
offline_py.iplot(fig, config=config)
```
#### File: .Trash-0/files/helper 4.py
```python
import pandas as pd
import os
import tempfile
import zipfile
import glob
from tqdm import tqdm
import math
import requests
def download_quandl_dataset(quandl_api_key, database, dataset, save_path, columns, tickers, start_date, end_date):
"""
Download a dataset from Quandl and save it to `save_path`.
Filter by columns, tickers, and date
:param quandl_api_key: The Quandl API key
:param database: The Quandl database to download from
:param dataset: The dataset to download
:param save_path: The path to save the dataset
:param columns: The columns to save
:param tickers: The tickers to save
:param start_date: The rows to save that are older than this date
:param end_date: The rows to save that are younger than this date
"""
scrape_url = 'https://www.quandl.com/api/v3/datatables/{}/{}?qopts.export=true&api_key={}'\
.format(database, dataset, quandl_api_key)
scrape_request = requests.get(scrape_url)
bulk_download_url = scrape_request.json()['datatable_bulk_download']['file']['link']
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_wiki_file = tmp_dir + 'tmp.zip'
bulk_download_request = requests.get(bulk_download_url, stream=True, cookies=scrape_request.cookies)
total_size = int(bulk_download_request.headers.get('content-length', 0));
block_size = 1024 * 1024
with open(tmp_wiki_file, 'wb') as f:
for data in tqdm(
bulk_download_request.iter_content(block_size),
total=math.ceil(total_size // block_size),
unit='MB',
unit_scale=True,
desc='Downloading Data'):
f.write(data)
with tqdm(total=5, desc='Transforming Data', unit='Action') as pbar:
# Unzip downloaded data
zip_ref = zipfile.ZipFile(tmp_wiki_file, 'r')
zip_ref.extractall(tmp_dir)
zip_ref.close()
pbar.update(1)
# Check if the zip file only contains one csv file
# We're assuming that Quandl will always give us the data in a single csv file.
# If it's different, we want to throw an error.
csv_files = glob.glob(os.path.join(tmp_dir, '*.csv'))
assert len(csv_files) == 1,\
'Bulk download of Quandl Wiki data failed. Wrong number of csv files found. Found {} file(s).'\
.format(len(csv_files))
tmp_csv_file = csv_files[0]
tmp_df = pd.read_csv(tmp_csv_file)
pbar.update(1)
tmp_df['date'] = pd.to_datetime(tmp_df['date'])
pbar.update(1)
# Remove unused data and save
tmp_df = tmp_df[tmp_df['date'].isin(pd.date_range(start_date, end_date))] # Filter unused dates
tmp_df = tmp_df[tmp_df['ticker'].isin(tickers)] # Filter unused tickers
pbar.update(1)
tmp_df.to_csv(save_path, columns=columns, index=False) # Filter unused columns and save
pbar.update(1)
```
#### File: .Trash-0/files/project_helper.py
```python
import numpy as np
from IPython.core.display import display, HTML
import plotly.graph_objs as go
from plotly import tools
import plotly.offline as offline_py
offline_py.init_notebook_mode(connected=True)
_color_scheme = {
'index': '#B6B2CF',
'etf': '#2D3ECF',
'tracking_error': '#6F91DE',
'df_header': 'silver',
'df_value': 'white',
'df_line': 'silver',
'heatmap_colorscale': [(0, '#6F91DE'), (0.5, 'grey'), (1, 'red')]}
def _generate_config():
return {'showLink': False, 'displayModeBar': False, 'showAxisRangeEntryBoxes': True}
def _generate_heatmap_trace(df, x_label, y_label, z_label, scale_min, scale_max):
x_hover_text_values = np.tile(df.columns, (len(df.index), 1))
y_hover_text_values = np.tile(df.index, (len(df.columns), 1))
padding_len = np.full(3, max(len(x_label), len(y_label), len(z_label))) -\
[len(x_label), len(y_label), len(z_label)]
# Additional padding added to ticker and date to align
hover_text = y_label + ': ' + padding_len[1] * ' ' + y_hover_text_values.T + '<br>' + \
x_label + ': ' + padding_len[0] * ' ' + x_hover_text_values + '<br>' + \
z_label + ': ' + padding_len[2] * ' ' + df.applymap('{:.3f}'.format)
return go.Heatmap(
x=df.columns,
y=df.index,
z=df.values,
zauto=False,
zmax=scale_max,
zmin=scale_min,
colorscale=_color_scheme['heatmap_colorscale'],
text=hover_text.values,
hoverinfo='text')
def _sanatize_string(string):
return ''.join([i for i in string if i.isalpha()])
def large_dollar_volume_stocks(df, price_column, volume_column, top_percent):
"""
Get the stocks with the largest dollar volume stocks.
Parameters
----------
df : DataFrame
Stock prices with dates and ticker symbols
price_column : str
The column with the price data in `df`
volume_column : str
The column with the volume in `df`
top_percent : float
The top x percent to consider largest in the stock universe
Returns
-------
large_dollar_volume_stocks_symbols : List of str
List of of large dollar volume stock symbols
"""
dollar_traded = df.groupby('ticker').apply(lambda row: sum(row[volume_column] * row[price_column]))
return dollar_traded.sort_values().tail(int(len(dollar_traded) * top_percent)).index.values.tolist()
def plot_benchmark_returns(index_data, etf_data, title):
config = _generate_config()
index_trace = go.Scatter(
name='Index',
x=index_data.index,
y=index_data,
line={'color': _color_scheme['index']})
etf_trace = go.Scatter(
name='ETF',
x=etf_data.index,
y=etf_data,
line={'color': _color_scheme['etf']})
layout = go.Layout(
title=title,
xaxis={'title': 'Date'},
yaxis={'title': 'Cumulative Returns', 'range': [0, 3]})
fig = go.Figure(data=[index_trace, etf_trace], layout=layout)
offline_py.iplot(fig, config=config)
def plot_tracking_error(tracking_error, title):
config = _generate_config()
trace = go.Scatter(
x=tracking_error.index,
y=tracking_error,
line={'color': _color_scheme['tracking_error']})
layout = go.Layout(
title=title,
xaxis={'title': 'Date'},
yaxis={'title': 'Error', 'range': [-1.5, 1.5]})
fig = go.Figure(data=[trace], layout=layout)
offline_py.iplot(fig, config=config)
def print_dataframe(df, n_rows=10, n_columns=3):
missing_val_str = '...'
config = _generate_config()
formatted_df = df.iloc[:n_rows, :n_columns]
formatted_df = formatted_df.applymap('{:.3f}'.format)
if len(df.columns) > n_columns:
formatted_df[missing_val_str] = [missing_val_str]*len(formatted_df.index)
if len(df.index) > n_rows:
formatted_df.loc[missing_val_str] = [missing_val_str]*len(formatted_df.columns)
trace = go.Table(
type='table',
columnwidth=[1, 3],
header={
'values': [''] + list(formatted_df.columns.values),
'line': {'color': _color_scheme['df_line']},
'fill': {'color': _color_scheme['df_header']},
'font': {'size': 13}},
cells={
'values': formatted_df.reset_index().values.T,
'line': {'color': _color_scheme['df_line']},
'fill': {'color': [_color_scheme['df_header'], _color_scheme['df_value']]},
'font': {'size': 13}})
offline_py.iplot([trace], config=config)
def plot_weights(weights, title):
config = _generate_config()
graph_path = 'graphs/{}.html'.format(_sanatize_string(title))
trace = _generate_heatmap_trace(weights, 'Date', 'Ticker', 'Weight', 0.0, 0.2)
layout = go.Layout(
title=title,
xaxis={'title': 'Dates'},
yaxis={'title': 'Tickers'})
fig = go.Figure(data=[trace], layout=layout)
offline_py.plot(fig, config=config, filename=graph_path, auto_open=False)
display(HTML('The graph for {} is too large. You can view it <a href="{}" target="_blank">here</a>.'
.format(title, graph_path)))
def plot_returns(returns, title):
config = _generate_config()
graph_path = 'graphs/{}.html'.format(_sanatize_string(title))
trace = _generate_heatmap_trace(returns, 'Date', 'Ticker', 'Weight', -0.3, 0.3)
layout = go.Layout(
title=title,
xaxis={'title': 'Dates'},
yaxis={'title': 'Tickers'})
fig = go.Figure(data=[trace], layout=layout)
offline_py.plot(fig, config=config, filename=graph_path, auto_open=False)
display(HTML('The graph for {} is too large. You can view it <a href="{}" target="_blank">here</a>.'
.format(title, graph_path)))
def plot_covariance(xty, xtx):
config = _generate_config()
xty_trace = go.Bar(
x=xty.index,
y=xty.values)
xtx_trace = _generate_heatmap_trace(xtx, 'Ticker 2', 'Ticker 1', 'Covariance', 0.0, 1.0)
fig = tools.make_subplots(rows=1, cols=2, subplot_titles=['XTY', 'XTX'], print_grid=False)
fig.append_trace(xty_trace, 1, 1)
fig.append_trace(xtx_trace, 1, 2)
fig['layout']['xaxis1'].update(title='Tickers')
fig['layout']['yaxis1'].update(title='Covariance')
fig['layout']['xaxis2'].update(title='Tickers')
fig['layout']['yaxis2'].update(title='Tickers')
offline_py.iplot(fig, config=config)
```
#### File: .Trash-0/files/project_tests 4.py
```python
from collections import OrderedDict
import pandas as pd
import numpy as np
from datetime import date, timedelta
pd.options.display.float_format = '{:.8f}'.format
def _generate_random_tickers(n_tickers=None):
min_ticker_len = 3
max_ticker_len = 5
tickers = []
if not n_tickers:
n_tickers = np.random.randint(8, 14)
ticker_symbol_random = np.random.randint(ord('A'), ord('Z')+1, (n_tickers, max_ticker_len))
ticker_symbol_lengths = np.random.randint(min_ticker_len, max_ticker_len, n_tickers)
for ticker_symbol_rand, ticker_symbol_length in zip(ticker_symbol_random, ticker_symbol_lengths):
ticker_symbol = ''.join([chr(c_id) for c_id in ticker_symbol_rand[:ticker_symbol_length]])
tickers.append(ticker_symbol)
return tickers
def _generate_random_dates(n_days=None):
if not n_days:
n_days = np.random.randint(14, 20)
start_year = np.random.randint(1999, 2017)
start_month = np.random.randint(1, 12)
start_day = np.random.randint(1, 29)
start_date = date(start_year, start_month, start_day)
dates = []
for i in range(n_days):
dates.append(start_date + timedelta(days=i))
return dates
def _generate_random_dfs(n_df, index, columns):
all_df_data = np.random.random((n_df, len(index), len(columns)))
return [pd.DataFrame(df_data, index, columns) for df_data in all_df_data]
def _generate_output_error_msg(fn_name, fn_inputs, fn_outputs, fn_expected_outputs):
formatted_inputs = []
formatted_outputs = []
formatted_expected_outputs = []
for input_name, input_value in fn_inputs.items():
formatted_outputs.append('INPUT {}:\n{}\n'.format(
input_name, str(input_value)))
for output_name, output_value in fn_outputs.items():
formatted_outputs.append('OUTPUT {}:\n{}\n'.format(
output_name, str(output_value)))
for expected_output_name, expected_output_value in fn_expected_outputs.items():
formatted_expected_outputs.append('EXPECTED OUTPUT FOR {}:\n{}\n'.format(
expected_output_name, str(expected_output_value)))
return 'Wrong value for {}.\n' \
'{}\n' \
'{}\n' \
'{}' \
.format(
fn_name,
'\n'.join(formatted_inputs),
'\n'.join(formatted_outputs),
'\n'.join(formatted_expected_outputs))
def _assert_output(fn, fn_inputs, fn_expected_outputs):
assert type(fn_expected_outputs) == OrderedDict
fn_outputs = OrderedDict()
fn_raw_out = fn(**fn_inputs)
if len(fn_expected_outputs) == 1:
fn_outputs[list(fn_expected_outputs)[0]] = fn_raw_out
elif len(fn_expected_outputs) > 1:
assert type(fn_raw_out) == tuple,\
'Expecting function to return tuple, got type {}'.format(type(fn_raw_out))
assert len(fn_raw_out) == len(fn_expected_outputs),\
'Expected {} outputs in tuple, only found {} outputs'.format(len(fn_expected_outputs), len(fn_raw_out))
for key_i, output_key in enumerate(fn_expected_outputs.keys()):
fn_outputs[output_key] = fn_raw_out[key_i]
err_message = _generate_output_error_msg(
fn.__name__,
fn_inputs,
fn_outputs,
fn_expected_outputs)
for fn_out, (out_name, expected_out) in zip(fn_outputs.values(), fn_expected_outputs.items()):
assert isinstance(fn_out, type(expected_out)),\
'Wrong type for output {}. Got {}, expected {}'.format(out_name, type(fn_out), type(expected_out))
if hasattr(expected_out, 'shape'):
assert fn_out.shape == expected_out.shape, \
'Wrong shape for output {}. Got {}, expected {}'.format(out_name, fn_out.shape, expected_out.shape)
if type(expected_out) == pd.DataFrame:
assert set(fn_out.columns) == set(expected_out.columns), \
'Incorrect columns for output {}\n' \
'COLUMNS: {}\n' \
'EXPECTED COLUMNS: {}'.format(out_name, sorted(fn_out.columns), sorted(expected_out.columns))
if type(expected_out) in {pd.DataFrame, pd.Series}:
assert set(fn_out.index) == set(expected_out.index), \
'Incorrect indices for output {}\n' \
'INDICES: {}\n' \
'EXPECTED INDICES: {}'.format(out_name, sorted(fn_out.index), sorted(expected_out.index))
out_is_close = np.isclose(fn_out, expected_out, equal_nan=True)
if not isinstance(out_is_close, bool):
out_is_close = out_is_close.all()
assert out_is_close, err_message
def project_test(func):
def func_wrapper(*args):
result = func(*args)
print('Tests Passed')
return result
return func_wrapper
@project_test
def test_generate_weighted_returns(fn):
tickers = _generate_random_tickers(3)
dates = _generate_random_dates(4)
fn_inputs = {
'returns': pd.DataFrame(
[
[np.nan, -0.0355852, -0.00461228, 0.00435667],
[np.nan, -0.0114943, -0.00106678, 0.016446],
[np.nan, -0.00326797, 0.00721311, 0.00537109]],
tickers, dates),
'weights': pd.DataFrame(
[
[0.0045101, 0.00761073, 0.0050893, 0.00593444],
[0.0980038, 0.0780279, 0.0742108, 0.0854871],
[0.0121753, 0.00943077, 0.0093783, 0.00886865]],
tickers, dates)}
fn_correct_outputs = OrderedDict([
(
'weighted_returns',
pd.DataFrame(
[
[np.nan, -0.000270829, -2.34733e-05, 2.58544e-05],
[np.nan, -0.000896876, -7.91666e-05, 0.00140592],
[np.nan, -3.08195e-05, 6.76467e-05, 4.76343e-05]],
tickers, dates))])
_assert_output(fn, fn_inputs, fn_correct_outputs)
@project_test
def test_generate_returns(fn):
tickers = _generate_random_tickers(3)
dates = _generate_random_dates(4)
fn_inputs = {
'close': pd.DataFrame(
[
[35.4411, 34.1799, 34.0223, 34.1705],
[92.1131, 91.0543, 90.9572, 92.453],
[57.9708, 57.7814, 58.1982, 58.5107]],
tickers, dates)}
fn_correct_outputs = OrderedDict([
(
'returns',
pd.DataFrame(
[
[np.nan, -0.0355858, -0.0046109, 0.00435597],
[np.nan, -0.0114946, -0.0010664, 0.0164451],
[np.nan, -0.00326716, 0.00721339, 0.00536958]],
tickers, dates))])
_assert_output(fn, fn_inputs, fn_correct_outputs)
@project_test
def test_generate_dollar_volume_weights(fn):
tickers = _generate_random_tickers(3)
dates = _generate_random_dates(4)
fn_inputs = {
'close': pd.DataFrame(
[
[35.4411, 34.1799, 34.0223, 34.1705],
[92.1131, 91.0543, 90.9572, 92.453],
[57.9708, 57.7814, 58.1982, 58.5107]],
tickers, dates),
'volume': pd.DataFrame(
[
[9.83683e+06, 1.78072e+07, 8.82982e+06, 1.06742e+07],
[8.22427e+07, 6.85315e+07, 4.81601e+07, 5.68313e+07],
[1.62348e+07, 1.30527e+07, 9.51201e+06, 9.31601e+06]],
tickers, dates)}
fn_correct_outputs = OrderedDict([
(
'dollar_volume_weights',
pd.DataFrame(
[
[0.0393246, 0.0800543, 0.0573905, 0.0591726],
[0.854516, 0.820747, 0.836853, 0.852398],
[0.106159, 0.0991989, 0.105756, 0.0884298]],
tickers, dates))])
_assert_output(fn, fn_inputs, fn_correct_outputs)
@project_test
def test_calculate_cumulative_returns(fn):
tickers = _generate_random_tickers(3)
dates = _generate_random_dates(4)
fn_inputs = {
'returns': pd.DataFrame(
[
[np.nan, -0.000270829, -2.34733e-05, 2.58544e-05],
[np.nan, -0.000896873, -7.91666e-05, 0.00140592],
[np.nan, -3.08195e-05, 6.76468e-05, 4.76344e-05]],
tickers, dates)}
fn_correct_outputs = OrderedDict([
(
'cumulative_returns',
pd.Series(
[np.nan, 0.99880148, 0.99876653, 1.00024411],
dates))])
_assert_output(fn, fn_inputs, fn_correct_outputs)
@project_test
def test_calculate_dividend_weights(fn):
tickers = _generate_random_tickers(3)
dates = _generate_random_dates(4)
fn_inputs = {
'ex_dividend': pd.DataFrame(
[
[0.0, 0.0, 0.1, 0.0],
[0.0, 0.0, 0.0, 0.2],
[0.0, 0.0, 0.0, 0.3]],
tickers, dates)}
fn_correct_outputs = OrderedDict([
(
'dividend_weights',
pd.DataFrame(
[
[np.nan, np.nan, 1.0, 0.16666666],
[np.nan, np.nan, 0.0, 0.33333333],
[np.nan, np.nan, 0.0, 0.5]],
tickers, dates))])
_assert_output(fn, fn_inputs, fn_correct_outputs)
@project_test
def test_get_covariance(fn):
tickers = _generate_random_tickers(3)
dates = _generate_random_dates(4)
fn_inputs = {
'returns': pd.DataFrame(
[
[np.nan, -0.0355852, -0.00461228, 0.00435667],
[np.nan, -0.0114943, -0.00106678, 0.016446],
[np.nan, -0.00326797, 0.00721311, 0.00537109]],
tickers, dates),
'weighted_index_returns': pd.DataFrame(
[
[np.nan, -0.000270829, -2.34733e-05, 2.58544e-05],
[np.nan, -0.000896873, -7.91666e-05, 0.00140592],
[np.nan, -3.08195e-05, 6.76468e-05, 4.76344e-05]],
tickers, dates)}
fn_correct_outputs = OrderedDict([
(
'xtx',
np.array(
[
[0.00130656, 0.000485597, 0.000106423],
[0.000485597, 0.000403728, 0.000118201],
[0.000106423, 0.000118201, 9.15572e-05]])),
(
'xty',
np.array([4.92563e-05, 3.81439e-05, 1.16104e-05]))])
_assert_output(fn, fn_inputs, fn_correct_outputs)
@project_test
def test_rebalance_portfolio(fn):
tickers = _generate_random_tickers(3)
dates = _generate_random_dates(11)
fn_inputs = {
'returns': pd.DataFrame(
[
[np.nan, -0.0355852, -0.00461228, 0.00435667, -0.0396183, -0.0121951,
0.00685871, -0.0027248, 0.0251973,-0.026947, -0.0465612],
[np.nan, -0.0114943, -0.00106678, 0.016446, -0.0104013, -0.0040344,
-0.00557701, 0.000754961, 0.00678952, -0.00974095, -0.0234569],
[np.nan, -0.00326797, 0.00721311, 0.00537109, -0.00501862, 0.0143183,
0.00272698, 0.019037, 0.000627943, -0.0163163, -0.00334928]],
tickers, dates),
'weighted_index_returns': pd.DataFrame(
[
[np.nan, -0.000270829, -2.34733e-05, 2.58544e-05, -0.000291808, -8.56712e-05,
5.10542e-05, -1.63907e-05, 0.000127297, -0.000126851, -0.000330526],
[np.nan, -0.000896873, -7.91666e-05, 0.00140592, -0.000653316, -0.000246364,
-0.000395049, 4.47478e-05, 0.000389117, -0.000449979, -0.00254699],
[np.nan, -3.08195e-05, 6.76468e-05, 4.76344e-05, -4.24937e-05, 0.000136497,
3.14274e-05, 0.000226068, 8.55098e-06, -0.000161634, -3.06379e-05]],
tickers, dates),
'shift_size': 3,
'chunk_size': 2}
fn_correct_outputs = OrderedDict([
(
'all_rebalance_weights',
[
np.array([0.00012205033508460705, 0.0003019915743383353, 0.999575958090577]),
np.array([1.305709815242165e-05, 8.112998801084706e-06, 0.9999788299030465]),
np.array([0.3917481750142896, 0.5607687848565064, 0.0474830401292039])])])
_assert_output(fn, fn_inputs, fn_correct_outputs)
@project_test
def test_get_rebalance_cost(fn):
fn_inputs = {
'all_rebalance_weights': [
np.array([0.00012205033508460705, 0.0003019915743383353, 0.999575958090577]),
np.array([1.305709815242165e-05, 8.112998801084706e-06, 0.9999788299030465]),
np.array([0.3917481750142896, 0.5607687848565064, 0.0474830401292039])],
'shift_size': 3,
'rebalance_count': 11}
fn_correct_outputs = OrderedDict([('rebalancing_cost', 0.51976290)])
_assert_output(fn, fn_inputs, fn_correct_outputs)
``` |
{
"source": "JingZhang918/master_thesis",
"score": 3
} |
#### File: master_thesis/DRL_Automated_Trading/data_process.py
```python
import pandas as pd
import config
import numpy as np
def get_revised_yearly_return(df_drl) -> pd.DataFrame:
# to deal companies with incomplete yearly data
df_drl = df_drl.set_index(["date"])
temp1 = \
df_drl.groupby(["ticker", df_drl.index.year]).apply(lambda x: pd.Series({
"count": x.asset.count(),
"first": x.asset[0],
"last": x.asset[-1],
}
))
temp1["days"] = [config.yearly_trading_days[y] for y in temp1.index.get_level_values(1)]
temp1["factor"] = temp1["days"].div(temp1["count"])
temp1["revised_return"] = (temp1["last"].div(temp1["first"])) ** temp1["factor"]
temp1["revised_first"] = temp1["last"].div(temp1["revised_return"])
temp2 = temp1.groupby(level=["date"])['revised_first', 'last'].apply(np.sum)
yearly_asset_return = temp2["last"].div(temp2["revised_first"]) - 1
yearly_reward_return = (df_drl.groupby([df_drl.index.year]).reward.apply(np.sum)).div(temp2["revised_first"])
return yearly_asset_return, yearly_reward_return
def get_revised_monthly_return(df_drl) -> pd.DataFrame:
# to deal companies with incomplete yearly data
df_drl = df_drl.set_index(["date"])
temp1 = \
df_drl.groupby(["ticker", df_drl.index.year, df_drl.index.month]).apply(lambda x: pd.Series({
"count": x.asset.count(),
"first": x.asset[0],
"last": x.asset[-1],
}
))
temp1.index.names = ["ticker", "year", "month"]
temp1["days"] = [config.monthly_trading_days[y][m] for y, m in
zip(temp1.index.get_level_values(1), temp1.index.get_level_values(2))]
temp1["factor"] = temp1["days"].div(temp1["count"])
temp1["revised_return"] = (temp1["last"].div(temp1["first"])) ** temp1["factor"]
temp1["revised_first"] = temp1["last"].div(temp1["revised_return"])
temp2 = temp1.groupby(level=["year", "month"])['revised_first', 'last'].apply(np.sum)
monthly_asset_return = temp2["last"].div(temp2["revised_first"]) - 1
monthly_reward_return = (df_drl.groupby([df_drl.index.year, df_drl.index.month]).reward.apply(np.sum)).div(temp2["revised_first"])
return monthly_asset_return, monthly_reward_return
``` |
{
"source": "JingzhaoZhang/transformerxl-noise",
"score": 2
} |
#### File: transformerxl-noise/pytorch/train.py
```python
import argparse
import time
import math
import os, sys
import itertools
import warnings
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
log = file if hasattr(file,'write') else sys.stderr
traceback.print_stack(file=log)
log.write(warnings.formatwarning(message, category, filename, lineno, line))
warnings.showwarning = warn_with_traceback
import numpy as np
import errno
import torch
import torch.nn as nn
import torch.optim as optim
from data_utils import get_lm_corpus
from mem_transformer import MemTransformerLM
from utils.exp_utils import *
from utils.linalg import *
from utils.data_parallel import BalancedDataParallel
import warnings
from tqdm import tqdm
# warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='PyTorch Transformer Language Model')
parser.add_argument('--data', type=str, default='../data/wikitext-103',
help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='wt103',
choices=['wt103', 'lm1b', 'enwik8', 'text8'],
help='dataset name')
parser.add_argument('--n_layer', type=int, default=12,
help='number of total layers')
parser.add_argument('--n_head', type=int, default=10,
help='number of heads')
parser.add_argument('--d_head', type=int, default=50,
help='head dimension')
parser.add_argument('--d_embed', type=int, default=-1,
help='embedding dimension')
parser.add_argument('--d_model', type=int, default=500,
help='model dimension')
parser.add_argument('--d_inner', type=int, default=1000,
help='inner dimension in FF')
parser.add_argument('--dropout', type=float, default=0.0,
help='global dropout rate')
parser.add_argument('--dropatt', type=float, default=0.0,
help='attention probability dropout rate')
parser.add_argument('--init', default='normal', type=str,
help='parameter initializer to use.')
parser.add_argument('--emb_init', default='normal', type=str,
help='parameter initializer to use.')
parser.add_argument('--init_range', type=float, default=0.1,
help='parameters initialized by U(-init_range, init_range)')
parser.add_argument('--emb_init_range', type=float, default=0.01,
help='parameters initialized by U(-init_range, init_range)')
parser.add_argument('--init_std', type=float, default=0.02,
help='parameters initialized by N(0, init_std)')
parser.add_argument('--proj_init_std', type=float, default=0.01,
help='parameters initialized by N(0, init_std)')
parser.add_argument('--optim', default='adam', type=str,
choices=['adam', 'sgd', 'adagrad'],
help='optimizer to use.')
parser.add_argument('--lr', type=float, default=0.00025,
help='initial learning rate (0.00025|5 for adam|sgd)')
parser.add_argument('--mom', type=float, default=0.9,
help='momentum for sgd')
parser.add_argument('--scheduler', default='cosine', type=str,
choices=['cosine', 'inv_sqrt', 'dev_perf', 'constant'],
help='lr scheduler to use.')
parser.add_argument('--warmup_step', type=int, default=0,
help='upper epoch limit')
parser.add_argument('--decay_rate', type=float, default=0.5,
help='decay factor when ReduceLROnPlateau is used')
parser.add_argument('--lr_min', type=float, default=0.0,
help='minimum learning rate during annealing')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--clip_nonemb', action='store_true',
help='only clip the gradient of non-embedding params')
parser.add_argument('--max_step', type=int, default=100000,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=60,
help='batch size')
parser.add_argument('--batch_chunk', type=int, default=1,
help='split batch into chunks to save memory')
parser.add_argument('--tgt_len', type=int, default=70,
help='number of tokens to predict')
parser.add_argument('--eval_tgt_len', type=int, default=50,
help='number of tokens to predict for evaluation')
parser.add_argument('--ext_len', type=int, default=0,
help='length of the extended context')
parser.add_argument('--mem_len', type=int, default=0,
help='length of the retained previous heads')
parser.add_argument('--not_tied', action='store_true',
help='do not tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--adaptive', action='store_true',
help='use adaptive softmax')
parser.add_argument('--div_val', type=int, default=1,
help='divident value for adapative input and softmax')
parser.add_argument('--pre_lnorm', action='store_true',
help='apply LayerNorm to the input instead of the output')
parser.add_argument('--varlen', action='store_true',
help='use variable length')
parser.add_argument('--multi_gpu', action='store_true',
help='use multiple GPU')
parser.add_argument('--log-interval', type=int, default=50,
help='report interval')
parser.add_argument('--eval-interval', type=int, default=4000,
help='evaluation interval')
parser.add_argument('--work_dir', default='LM-TFM', type=str,
help='experiment directory.')
parser.add_argument('--restart', action='store_true',
help='restart training from the saved checkpoint')
parser.add_argument('--restart_dir', type=str, default='',
help='restart dir')
parser.add_argument('--debug', action='store_true',
help='run in debug mode (do not create exp dir)')
parser.add_argument('--same_length', action='store_true',
help='use the same attn length for all tokens')
parser.add_argument('--attn_type', type=int, default=0,
help='attention type. 0 for ours, 1 for Shaw et al,'
'2 for Vaswani et al, 3 for Al Rfou et al.')
parser.add_argument('--clamp_len', type=int, default=-1,
help='use the same pos embeddings after clamp_len')
parser.add_argument('--eta_min', type=float, default=0.0,
help='min learning rate for cosine scheduler')
parser.add_argument('--gpu0_bsz', type=int, default=-1,
help='batch size on gpu 0')
parser.add_argument('--max_eval_steps', type=int, default=-1,
help='max eval steps')
parser.add_argument('--sample_softmax', type=int, default=-1,
help='number of samples in sampled softmax')
parser.add_argument('--patience', type=int, default=0,
help='patience')
parser.add_argument('--finetune_v2', action='store_true',
help='finetune v2')
parser.add_argument('--finetune_v3', action='store_true',
help='finetune v3')
parser.add_argument('--fp16', action='store_true',
help='Run in pseudo-fp16 mode (fp16 storage fp32 math).')
parser.add_argument('--static-loss-scale', type=float, default=1,
help='Static loss scale, positive power of 2 values can '
'improve fp16 convergence.')
parser.add_argument('--dynamic-loss-scale', action='store_true',
help='Use dynamic loss scaling. If supplied, this argument'
' supersedes --static-loss-scale.')
parser.add_argument('-save_noise', action='store_true')
parser.add_argument('-noise_per_iter', type=int, default=500)
parser.add_argument('--save-dir', type=str, default='default',
help='path to save the final model')
parser.add_argument('-noise_size', type=int, default=800)
parser.add_argument('-save_sharpness', action='store_true')
parser.add_argument('-sharpness_per_iter', type=int, default=500)
parser.add_argument('-sharpness_batches', type=int, default=20)
args = parser.parse_args()
args.tied = not args.not_tied
if args.d_embed < 0:
args.d_embed = args.d_model
assert args.ext_len >= 0, 'extended context length must be non-negative'
assert args.batch_size % args.batch_chunk == 0
args.work_dir = '{}-{}'.format(args.work_dir, args.dataset)
args.work_dir = os.path.join(args.work_dir, time.strftime('%Y%m%d-%H%M%S'))
logging = create_exp_dir(args.work_dir,
scripts_to_save=['train.py', 'mem_transformer.py'], debug=args.debug)
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print('WARNING: You have a CUDA device, so you should probably run with --cuda')
else:
torch.cuda.manual_seed_all(args.seed)
# Validate `--fp16` option
if args.fp16:
if not args.cuda:
print('WARNING: --fp16 requires --cuda, ignoring --fp16 option')
args.fp16 = False
else:
try:
from apex.fp16_utils import FP16_Optimizer
except:
print('WARNING: apex not installed, ignoring --fp16 option')
args.fp16 = False
device = torch.device('cuda' if args.cuda else 'cpu')
###############################################################################
# Load data
###############################################################################
corpus = get_lm_corpus(args.data, args.dataset)
ntokens = len(corpus.vocab)
args.n_token = ntokens
eval_batch_size = 10
tr_iter = corpus.get_iterator('train', args.batch_size, args.tgt_len,
device=device, ext_len=args.ext_len)
va_iter = corpus.get_iterator('valid', eval_batch_size, args.eval_tgt_len,
device=device, ext_len=args.ext_len)
te_iter = corpus.get_iterator('test', eval_batch_size, args.eval_tgt_len,
device=device, ext_len=args.ext_len)
# adaptive softmax / embedding
cutoffs, tie_projs = [], [False]
if args.adaptive:
assert args.dataset in ['wt103', 'lm1b']
if args.dataset == 'wt103':
cutoffs = [20000, 40000, 200000]
tie_projs += [True] * len(cutoffs)
elif args.dataset == 'lm1b':
cutoffs = [60000, 100000, 640000]
tie_projs += [False] * len(cutoffs)
###############################################################################
# Build the model
###############################################################################
def init_weight(weight):
if args.init == 'uniform':
nn.init.uniform_(weight, -args.init_range, args.init_range)
elif args.init == 'normal':
nn.init.normal_(weight, 0.0, args.init_std)
def init_bias(bias):
nn.init.constant_(bias, 0.0)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
if hasattr(m, 'weight') and m.weight is not None:
init_weight(m.weight)
if hasattr(m, 'bias') and m.bias is not None:
init_bias(m.bias)
elif classname.find('AdaptiveEmbedding') != -1:
if hasattr(m, 'emb_projs'):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, args.proj_init_std)
elif classname.find('Embedding') != -1:
if hasattr(m, 'weight'):
init_weight(m.weight)
elif classname.find('ProjectedAdaptiveLogSoftmax') != -1:
if hasattr(m, 'cluster_weight') and m.cluster_weight is not None:
init_weight(m.cluster_weight)
if hasattr(m, 'cluster_bias') and m.cluster_bias is not None:
init_bias(m.cluster_bias)
if hasattr(m, 'out_projs'):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, args.proj_init_std)
elif classname.find('LayerNorm') != -1:
if hasattr(m, 'weight'):
nn.init.normal_(m.weight, 1.0, args.init_std)
if hasattr(m, 'bias') and m.bias is not None:
init_bias(m.bias)
elif classname.find('TransformerLM') != -1:
if hasattr(m, 'r_emb'):
init_weight(m.r_emb)
if hasattr(m, 'r_w_bias'):
init_weight(m.r_w_bias)
if hasattr(m, 'r_r_bias'):
init_weight(m.r_r_bias)
if hasattr(m, 'r_bias'):
init_bias(m.r_bias)
def update_dropout(m):
classname = m.__class__.__name__
if classname.find('Dropout') != -1:
if hasattr(m, 'p'):
m.p = args.dropout
def update_dropatt(m):
if hasattr(m, 'dropatt'):
m.dropatt.p = args.dropatt
if args.restart:
with open(os.path.join(args.restart_dir, 'model.pt'), 'rb') as f:
model = torch.load(f)
if not args.fp16:
model = model.float()
model.apply(update_dropout)
model.apply(update_dropatt)
else:
model = MemTransformerLM(ntokens, args.n_layer, args.n_head, args.d_model,
args.d_head, args.d_inner, args.dropout, args.dropatt,
tie_weight=args.tied, d_embed=args.d_embed, div_val=args.div_val,
tie_projs=tie_projs, pre_lnorm=args.pre_lnorm, tgt_len=args.tgt_len,
ext_len=args.ext_len, mem_len=args.mem_len, cutoffs=cutoffs,
same_length=args.same_length, attn_type=args.attn_type,
clamp_len=args.clamp_len, sample_softmax=args.sample_softmax)
model.apply(weights_init)
model.word_emb.apply(weights_init) # ensure embedding init is not overridden by out_layer in case of weight sharing
args.n_all_param = sum([p.nelement() for p in model.parameters()])
args.n_nonemb_param = sum([p.nelement() for p in model.layers.parameters()])
if args.fp16:
model = model.half()
if args.multi_gpu:
model = model.to(device)
if args.gpu0_bsz >= 0:
para_model = BalancedDataParallel(args.gpu0_bsz // args.batch_chunk,
model, dim=1).to(device)
else:
para_model = nn.DataParallel(model, dim=1).to(device)
else:
para_model = model.to(device)
#### optimizer
if args.optim.lower() == 'sgd':
if args.sample_softmax > 0:
dense_params, sparse_params = [], []
for param in model.parameters():
if param.size() == model.word_emb.weight.size():
sparse_params.append(param)
else:
dense_params.append(param)
optimizer_sparse = optim.SGD(sparse_params, lr=args.lr * 2)
optimizer = optim.SGD(dense_params, lr=args.lr, momentum=args.mom)
else:
optimizer = optim.SGD(model.parameters(), lr=args.lr,
momentum=args.mom)
elif args.optim.lower() == 'adam':
if args.sample_softmax > 0:
dense_params, sparse_params = [], []
for param in model.parameters():
if param.size() == model.word_emb.weight.size():
sparse_params.append(param)
else:
dense_params.append(param)
optimizer_sparse = optim.SparseAdam(sparse_params, lr=args.lr)
optimizer = optim.Adam(dense_params, lr=args.lr)
else:
optimizer = optim.Adam(model.parameters(), lr=args.lr)
elif args.optim.lower() == 'adagrad':
optimizer = optim.Adagrad(model.parameters(), lr=args.lr)
#### scheduler
if args.scheduler == 'cosine':
# here we do not set eta_min to lr_min to be backward compatible
# because in previous versions eta_min is default to 0
# rather than the default value of lr_min 1e-6
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
args.max_step, eta_min=args.eta_min) # should use eta_min arg
if args.sample_softmax > 0:
scheduler_sparse = optim.lr_scheduler.CosineAnnealingLR(optimizer_sparse,
args.max_step, eta_min=args.eta_min) # should use eta_min arg
elif args.scheduler == 'inv_sqrt':
# originally used for Transformer (in Attention is all you need)
def lr_lambda(step):
# return a multiplier instead of a learning rate
if step == 0 and args.warmup_step == 0:
return 1.
else:
return 1. / (step ** 0.5) if step > args.warmup_step \
else step / (args.warmup_step ** 1.5)
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
elif args.scheduler == 'dev_perf':
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
factor=args.decay_rate, patience=args.patience, min_lr=args.lr_min)
if args.sample_softmax > 0:
scheduler_sparse = optim.lr_scheduler.ReduceLROnPlateau(optimizer_sparse,
factor=args.decay_rate, patience=args.patience, min_lr=args.lr_min)
elif args.scheduler == 'constant':
pass
if args.cuda and args.fp16:
# If args.dynamic_loss_scale is False, static_loss_scale will be used.
# If args.dynamic_loss_scale is True, it will take precedence over static_loss_scale.
optimizer = FP16_Optimizer(optimizer,
static_loss_scale = args.static_loss_scale,
dynamic_loss_scale = args.dynamic_loss_scale,
dynamic_loss_args = {'init_scale': 2 ** 16})
if args.restart:
if os.path.exists(os.path.join(args.restart_dir, 'optimizer.pt')):
with open(os.path.join(args.restart_dir, 'optimizer.pt'), 'rb') as f:
opt_state_dict = torch.load(f)
optimizer.load_state_dict(opt_state_dict)
else:
print('Optimizer was not saved. Start from scratch.')
logging('=' * 100)
for k, v in args.__dict__.items():
logging(' - {} : {}'.format(k, v))
logging('=' * 100)
logging('#params = {}'.format(args.n_all_param))
logging('#non emb params = {}'.format(args.n_nonemb_param))
###############################################################################
# Training code
###############################################################################
def evaluate(eval_iter):
# Turn on evaluation mode which disables dropout.
model.eval()
# If the model does not use memory at all, make the ext_len longer.
# Otherwise, make the mem_len longer and keep the ext_len the same.
if args.mem_len == 0:
model.reset_length(args.eval_tgt_len,
args.ext_len+args.tgt_len-args.eval_tgt_len, args.mem_len)
else:
model.reset_length(args.eval_tgt_len,
args.ext_len, args.mem_len+args.tgt_len-args.eval_tgt_len)
# Evaluation
total_len, total_loss = 0, 0.
with torch.no_grad():
mems = tuple()
for i, (data, target, seq_len) in enumerate(eval_iter):
if args.max_eval_steps > 0 and i >= args.max_eval_steps:
break
ret = model(data, target, *mems)
loss, mems = ret[0], ret[1:]
loss = loss.mean()
total_loss += seq_len * loss.float().item()
total_len += seq_len
# Switch back to the training mode
model.reset_length(args.tgt_len, args.ext_len, args.mem_len)
model.train()
return total_loss / total_len
def compute_grad_epoch(mems, train_iter):
noise_sq = []
stograd_sq = []
# Turn on training mode which enables dropout.
global train_step, train_loss, best_val_loss, eval_start_time, log_start_time
model.train()
optimizer.zero_grad()
# if args.batch_chunk > 1:
# mems = [tuple() for _ in range(args.batch_chunk)]
# else:
# mems = tuple()
for batch in range(args.noise_size):
data, target, seq_len = train_iter.__next__()
if args.batch_chunk > 1:
data_chunks = torch.chunk(data, args.batch_chunk, 1)
target_chunks = torch.chunk(target, args.batch_chunk, 1)
for i in range(args.batch_chunk):
data_i = data_chunks[i].contiguous()
target_i = target_chunks[i].contiguous()
ret = para_model(data_i, target_i, *mems[i])
loss, mems[i] = ret[0], ret[1:]
loss = loss.float().mean().type_as(loss) / args.batch_chunk / args.noise_size
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
train_loss += loss.float().item()
else:
ret = para_model(data, target, *mems)
loss, mems = ret[0], ret[1:]
loss = loss.float().mean().type_as(loss) / args.noise_size
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
train_loss += loss.float().item()
if train_step == args.max_step or batch == args.noise_size - 1:
break
true_grad = {}
clone_grad(model, true_grad)
optimizer.zero_grad()
return true_grad, mems
def compute_sto_grad_norm(mems, train_iter):
noise_sq = []
stograd_sq = []
# Turn on training mode which enables dropout.
global train_step, train_loss, best_val_loss, eval_start_time, log_start_time
model.train()
true_grads, mems = compute_grad_epoch(mems, train_iter)
gradnorm_sq = compute_norm(true_grads)
# if args.batch_chunk > 1:
# mems = [tuple() for _ in range(args.batch_chunk)]
# else:
# mems = tuple()
# train_iter = tr_iter.get_varlen_iter() if args.varlen else tr_iter.get_fixlen_iter
for batch in range(args.noise_size):
data, target, seq_len = train_iter.__next__()
model.zero_grad()
if args.batch_chunk > 1:
data_chunks = torch.chunk(data, args.batch_chunk, 1)
target_chunks = torch.chunk(target, args.batch_chunk, 1)
for i in range(args.batch_chunk):
data_i = data_chunks[i].contiguous()
target_i = target_chunks[i].contiguous()
ret = para_model(data_i, target_i, *mems[i])
loss, mems[i] = ret[0], ret[1:]
loss = loss.float().mean().type_as(loss) / args.batch_chunk
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
train_loss += loss.float().item()
else:
ret = para_model(data, target, *mems)
loss, mems = ret[0], ret[1:]
loss = loss.float().mean().type_as(loss)
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
train_loss += loss.float().item()
sto_grads = {}
clone_grad(model, sto_grads)
instance_noisesq, instance_gradsq = compute_noise(sto_grads, true_grads)
noise_sq.append(instance_noisesq)
stograd_sq.append(instance_gradsq)
if train_step == args.max_step or batch == args.noise_size - 1:
break
optimizer.zero_grad()
return noise_sq, stograd_sq, gradnorm_sq, mems
def train(cur_loss):
# Turn on training mode which enables dropout.
global train_step, train_loss, best_val_loss, eval_start_time, log_start_time
model.train()
if args.batch_chunk > 1:
mems = [tuple() for _ in range(args.batch_chunk)]
else:
mems = tuple()
train_iter = tr_iter.get_varlen_iter() if args.varlen else tr_iter.get_fixlen_iter()
# cur_loss = 0
print("start training")
for batch, (data, target, seq_len) in enumerate(train_iter):
model.zero_grad()
if args.save_sharpness and (batch % args.sharpness_per_iter == 0):
logging("Saving sharpness")
sharpness, mems = eigen_hessian(para_model, train_iter, args.sharpness_batches, mems)
weight_names, weights = param_weights(para_model)
weights_str = ['%4.4f' % w for w in weights]
with open(log_valid_file, 'a') as log_vf:
log_vf.write('{epoch},{sharpness: 8.5f},'.format(epoch=train_step, sharpness=sharpness) + ','.join(weights_str) + '\n')
if args.save_noise and (batch % args.noise_per_iter == 0):
logging("Saving noise")
true_gradnorm, sto_grad_norm, sto_noise_norm = 0,0,0
noise_sq, stograd_sq, true_gradnorm, mems = compute_sto_grad_norm(mems, train_iter)
sto_grad_norm = np.mean(stograd_sq)
sto_noise_norm = np.mean(noise_sq)
with open(log_train_file, 'a') as log_tf, open(log_valid_file, 'a') as log_vf:
log_tf.write('{epoch},{loss: 8.5f},{gradnorm:3.3f},{sto_grad_norm:3.3f},{noisenorm:3.3f}\n'.format(
epoch=train_step, loss=cur_loss,
gradnorm=true_gradnorm, sto_grad_norm=sto_grad_norm, noisenorm=sto_noise_norm))
if args.batch_chunk > 1:
data_chunks = torch.chunk(data, args.batch_chunk, 1)
target_chunks = torch.chunk(target, args.batch_chunk, 1)
for i in range(args.batch_chunk):
data_i = data_chunks[i].contiguous()
target_i = target_chunks[i].contiguous()
ret = para_model(data_i, target_i, *mems[i])
loss, mems[i] = ret[0], ret[1:]
loss = loss.float().mean().type_as(loss) / args.batch_chunk
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
train_loss += loss.float().item()
else:
ret = para_model(data, target, *mems)
loss, mems = ret[0], ret[1:]
loss = loss.float().mean().type_as(loss)
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
train_loss += loss.float().item()
if args.fp16:
optimizer.clip_master_grads(args.clip)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
if args.sample_softmax > 0:
optimizer_sparse.step()
# step-wise learning rate annealing
train_step += 1
if args.scheduler in ['cosine', 'constant', 'dev_perf']:
# linear warmup stage
if train_step < args.warmup_step:
curr_lr = args.lr * train_step / args.warmup_step
optimizer.param_groups[0]['lr'] = curr_lr
if args.sample_softmax > 0:
optimizer_sparse.param_groups[0]['lr'] = curr_lr * 2
else:
if args.scheduler == 'cosine':
scheduler.step(train_step)
if args.sample_softmax > 0:
scheduler_sparse.step(train_step)
elif args.scheduler == 'inv_sqrt':
scheduler.step(train_step)
if train_step % args.log_interval == 0:
cur_loss = train_loss / args.log_interval
elapsed = time.time() - log_start_time
log_str = '| epoch {:3d} step {:>8d} | {:>6d} batches | lr {:.3g} ' \
'| ms/batch {:5.2f} | loss {:5.2f}'.format(
epoch, train_step, batch+1, optimizer.param_groups[0]['lr'],
elapsed * 1000 / args.log_interval, cur_loss)
if args.dataset in ['enwik8', 'text8']:
log_str += ' | bpc {:9.5f}'.format(cur_loss / math.log(2))
else:
log_str += ' | ppl {:9.3f}'.format(math.exp(cur_loss))
logging(log_str)
train_loss = 0
log_start_time = time.time()
if train_step % args.eval_interval == 0:
val_loss = evaluate(va_iter)
logging('-' * 100)
log_str = '| Eval {:3d} at step {:>8d} | time: {:5.2f}s ' \
'| valid loss {:5.2f}'.format(
train_step // args.eval_interval, train_step,
(time.time() - eval_start_time), val_loss)
if args.dataset in ['enwik8', 'text8']:
log_str += ' | bpc {:9.5f}'.format(val_loss / math.log(2))
else:
log_str += ' | valid ppl {:9.3f}'.format(math.exp(val_loss))
logging(log_str)
logging('-' * 100)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
if not args.debug:
with open(os.path.join(args.work_dir, 'model.pt'), 'wb') as f:
torch.save(model, f)
with open(os.path.join(args.work_dir, 'optimizer.pt'), 'wb') as f:
torch.save(optimizer.state_dict(), f)
best_val_loss = val_loss
# dev-performance based learning rate annealing
if args.scheduler == 'dev_perf':
scheduler.step(val_loss)
if args.sample_softmax > 0:
scheduler_sparse.step(val_loss)
eval_start_time = time.time()
if train_step == args.max_step:
break
return train_loss/batch
# Loop over epochs.
train_step = 0
train_loss = 0
best_val_loss = None
log_start_time = time.time()
eval_start_time = time.time()
save_dir = './ckpt/' + args.save_dir + '/'
log_train_file = save_dir + 'train.csv'
log_valid_file = save_dir + 'sharpness.csv'
try:
os.makedirs(save_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
weight_names, weights = param_weights(para_model)
with open(log_train_file, 'w') as log_tf, open(log_valid_file, 'w') as log_vf:
log_tf.write('epoch,loss,gradnormsq,sto_grad_normsq,noisenormsq\n')
log_vf.write('epoch,sharpness,' +','.join(weight_names) + '\n')
# At any point you can hit Ctrl + C to break out of training early.
cur_loss = 0
for epoch in itertools.count(start=1):
cur_loss = train(cur_loss)
test_loss = evaluate(te_iter)
if train_step == args.max_step:
logging('-' * 100)
logging('End of training')
break
# Load the best saved model.
with open(os.path.join(args.work_dir, 'model.pt'), 'rb') as f:
model = torch.load(f)
para_model = model.to(device)
# Run on test data.
test_loss = evaluate(te_iter)
logging('=' * 100)
if args.dataset in ['enwik8', 'text8']:
logging('| End of training | test loss {:5.2f} | test bpc {:9.5f}'.format(
test_loss, test_loss / math.log(2)))
else:
logging('| End of training | test loss {:5.2f} | test ppl {:9.3f}'.format(
test_loss, math.exp(test_loss)))
logging('=' * 100)
``` |
{
"source": "JingzhaoZhang/why-clipping-accelerates",
"score": 3
} |
#### File: JingzhaoZhang/why-clipping-accelerates/utils.py
```python
import torch
import math
import os
import re
import csv
import sys
class CSVLogger(object):
def __init__(self, filename, args, keys):
self.filename = filename
self.args = args
self.keys = keys
self.values = {k:[] for k in keys}
self.init_file()
def init_file(self):
# This will overwrite previous file
if os.path.exists(self.filename):
return
directory = os.path.dirname(self.filename)
if not os.path.exists(directory):
os.makedirs(directory)
with open(self.filename, 'w') as logfile:
logwriter = csv.writer(logfile, delimiter=',')
# logwriter.writerow([str(self.args)])
logwriter.writerow(self.keys)
def write_row(self, values):
assert len(values) == len(self.keys)
if not os.path.exists(self.filename):
self.init_file()
with open(self.filename, 'a') as logfile:
logwriter = csv.writer(logfile, delimiter=',')
logwriter.writerow(values)
def repackage_hidden(h):
"""Wraps hidden states in new Tensors,
to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def batchify(data, bsz, args):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
if args.cuda:
data = data.cuda()
return data
def get_batch(source, i, args, seq_len=None, evaluation=False):
seq_len = min(seq_len if seq_len else args.bptt, len(source) - 1 - i)
data = source[i:i+seq_len]
target = source[i+1:i+1+seq_len].view(-1)
return data, target
def get_model_grads(model):
return [p.grad.data for _, p in model.named_parameters() if \
hasattr(p, 'grad') and (p.grad is not None)]
def get_model_params(model):
return [p.data for _, p in model.named_parameters() if \
hasattr(p, 'grad') and (p.grad is not None)]
def norm_diff(list1, list2=None):
if not list2:
list2 = [0] * len(list1)
assert len(list1) == len(list2)
return math.sqrt(sum((list1[i]-list2[i]).norm()**2 for i in range(len(list1))))
``` |
{
"source": "jingzhehu/udacity_sdcnd",
"score": 2
} |
#### File: term1/P3_behavior_cloning/drive.py
```python
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import argparse
import base64
import json
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from PIL import ImageOps
from flask import Flask, render_template
from io import BytesIO
import cv2
from keras.models import model_from_json
from keras.preprocessing.image import img_to_array
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
tf.python.control_flow_ops = control_flow_ops
sio = socketio.Server()
app = Flask(__name__)
model = None
def preprocess_input(img):
''' Crop, resize and convert input image from RGB to HLS colorspace
:param img: np array of uint8
:return: preprocessed image in float32
'''
img = cv2.resize(img[60:140, 40:280], (200, 66))
return cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype("float32")/255.0 - 0.5
@sio.on('telemetry')
def telemetry(sid, data):
# current steering angle of the car
steering_angle = data["steering_angle"]
# current throttle of the car
throttle = data["throttle"]
# current speed of the car
speed = float(data["speed"])
# current image from the center camera of the car
img_string = data["image"]
img = Image.open(BytesIO(base64.b64decode(img_string)))
# preprocess image from center camera
img = np.array(img, dtype=np.uint8)
img = preprocess_input(img)
img = img[None, :, :, :]
# predict steering angle from preprocessed image
# model accepts (66, 200, 3, dtype=float32) as input
steering_angle = float(model.predict(img, batch_size=1))
throttle_max = 1.0
throttle_min = -1.0
steering_threshold = 3. / 25
# targets for speed controller
nominal_set_speed = 30
steering_set_speed = 30
K = 0.35 # proportional gain
# slow down for turns
if abs(steering_angle) > steering_threshold:
set_speed = steering_set_speed
else:
set_speed = nominal_set_speed
throttle = (set_speed - speed) * K
throttle = min(throttle_max, throttle)
throttle = max(throttle_min, throttle)
# else don't change from previous
print("steering angle {:6.3f}, throttle {:6.3f}".format(steering_angle, throttle))
send_control(steering_angle, throttle)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit("steer",
data={'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()}
, skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument('model', type=str,
help='Path to model definition json. Model weights should be on the same path.')
args = parser.parse_args()
with open(args.model, 'r') as jfile:
model = model_from_json(jfile.read())
model.compile("rmsprop", "mse")
weights_file = args.model.replace('json', 'h5')
model.load_weights(weights_file)
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
```
#### File: term1/P4_advanced_lane_lines/P4_advanced_lane_finding.py
```python
import glob
import pickle
from collections import deque
from itertools import product
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from moviepy.editor import VideoFileClip
from natsort import natsorted
from sklearn.linear_model import ElasticNet
sns.set_style("whitegrid", {'axes.grid': False})
def get_obj_img_pts(img_cal_names, num_x=9, num_y=6):
'''Generate object/image points given filenames of calibration images
params:
num_x = the number of inner corner points along the x-axis of the test grid
num_y = the number of inner corner points along the y-axis of the test grid
'''
# generate object points
obj_pt = np.array(list(product(range(num_y), range(num_x), range(1))), np.float32)
obj_pt[:, [0, 1]] = obj_pt[:, [1, 0]]
obj_pts = []
img_pts = []
img_cals = []
img_cal_names_ret = []
for idx, img_cal_name in enumerate(img_cal_names):
img_cal = mpimg.imread(img_cal_name)
img_gray = cv2.cvtColor(img_cal, cv2.COLOR_RGB2GRAY)
ret, img_pt = cv2.findChessboardCorners(img_gray, (num_x, num_y), None)
if ret:
print('corners_found: {}'.format(img_cal_name))
obj_pts.append(obj_pt)
img_pts.append(img_pt)
# visualize the image points on calibration images
cv2.drawChessboardCorners(img_cal, (num_x, num_y), img_pt, ret)
img_cals.append(img_cal)
img_cal_names_ret.append(img_cal_name)
return obj_pts, img_pts, img_cals, img_cal_names_ret
def correct_dist(img, obj_pts, img_pts):
'''Undistort an image given object/image points
'''
img_size = (img.shape[1], img.shape[0])
ret, mtx, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(obj_pts, img_pts, img_size, None, None)
return cv2.undistort(img, mtx, dist_coeffs, None, mtx)
def img_subplots(imgs, img_names=None, f_size=(12, 10), f_cols=4):
'''Create subplots of images and return figure handle
'''
assert (len(imgs) == len(img_names))
f_rows = np.ceil(len(imgs) / f_cols).astype('int')
fig, f_axes = plt.subplots(f_rows, f_cols, figsize=f_size)
fig.set_tight_layout(True)
for idx, f_ax in enumerate(f_axes.reshape(-1)):
f_ax.axis("off")
if idx < len(imgs):
img = imgs[idx]
color_map = "gray" if len(img.shape) == 2 else None
f_ax.imshow(img, cmap=color_map)
if img_names is not None:
f_ax.set_title(img_names[idx])
return fig
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def region_of_interest(img, roi_vertex_scales):
"""Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
vertices shall have the shape (num_of_polygon, num_of_vertices, 2)
eg: vertices = np.array([[(wd*.45, ht*.53),(wd*.05, ht), (wd*.98, ht), (wd*.55, ht*.53)]], dtype=np.int32)
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
mask_color = (255,) * channel_count
ht, wd, _ = img.shape
else:
mask_color = 255
ht, wd = img.shape
vertices = np.int32([[(wd * wd_scale, ht * ht_scale) for (wd_scale, ht_scale) in roi_vertex_scales]])
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap, line_thickness=2, line_color=(0, 255, 0)):
"""Returns an image with hough lines drawn.
`img` should be the output of a Canny transform.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((*img.shape, 3), dtype=np.uint8)
draw_lines(line_img, lines, thickness=line_thickness, color=line_color)
return line_img
def draw_lines(img, lines, thickness=2, color=(255, 0, 0)):
""" Draw interpolated lanes on img
"""
lane_1st, lane_2nd = [], []
height, width, _ = img.shape
# separate the line segments based on slope and their position in the image
for line in lines:
for x1, y1, x2, y2 in line:
if ((x2 - x1) != 0) and ((y2 - y1) / (x2 - x1) < 0) and ((x1 + x2) / 2 / width < 0.55):
lane_1st.append(line)
elif ((x2 - x1) != 0) and ((y2 - y1) / (x2 - x1) > 0) and ((x1 + x2) / 2 / width > 0.55):
lane_2nd.append(line)
# fit the left and right lane separately with ElasticNet
x_pred = np.arange(img.shape[1]).reshape(-1, 1)
for lane in [np.array(lane_1st), np.array(lane_2nd)]:
lane = lane.reshape(lane.shape[0] * 2, 2)
X, y = lane[:, 0], lane[:, 1]
reg = ElasticNet().fit(X.reshape(-1, 1), y)
y_pred = np.hstack((x_pred, reg.predict(x_pred).reshape(-1, 1)))
cv2.polylines(img, np.int32([y_pred]), False, color, thickness)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def select_color(img, colors):
''' Return img with specified color selected
colors is a list of (color_lower, color_upper) tuples
'''
img_color_select = np.zeros_like(img)
for color_lower, color_upper in colors:
color_mask = cv2.inRange(img, color_lower, color_upper)
img_color_select += cv2.bitwise_and(img, img, mask=color_mask)
return img_color_select
def sobel_thresh(img, th=(30, 100), kernel_size=3, op_dirs=(1, 0), debug=False):
'''Absolute gradient thresholding
'''
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img_sobel = np.absolute(cv2.Sobel(img, cv2.CV_64F, *op_dirs, ksize=kernel_size))
img_sobel_scaled = np.uint8(255 * img_sobel / np.max(img_sobel))
img_bin = img2binary(img_sobel_scaled, th)
if debug:
return img_sobel_scaled, img_bin
else:
return img_bin
def mag_thresh(img, th=(30, 100), kernel_size=3, debug=False):
'''Gradient magnitude thresholding
'''
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img_sobel_x = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=kernel_size)
img_sobel_y = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=kernel_size)
img_sobel_mag = np.sqrt(img_sobel_x ** 2 + img_sobel_y ** 2)
img_sobel_scaled = np.uint8(255 * img_sobel_mag / np.max(img_sobel_mag))
img_bin = img2binary(img_sobel_scaled, th)
if debug:
return img_sobel_scaled, img_bin
else:
return img_bin
def img2binary(img, th=(75, 225)):
'''Covert an image to a binary mask given thresholds
'''
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img_bin = np.zeros_like(img)
img_bin[(img > th[0]) & (img <= th[1])] = 1
return img_bin
def threshold_multi(img, roi_vertex_scales, colors_rgb, colors_hls, sobel_th=(80, 150), debug=False):
img = gaussian_blur(img, kernel_size=3)
img_rgb = img
img_hls = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2HLS)
img_yuv = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2YUV)
# color selection in RGB and HLS space
img_rgb_bin = img2binary(select_color(img_rgb, colors_rgb), th=[0, 255])
img_hls_bin = img2binary(select_color(img_hls, colors_hls), th=(0, 255))
img_color_bin = img_rgb_bin | img_hls_bin
# U abs gradient th (YUV)
img_channel = img_yuv[:, :, 1]
img_u_sobel = sobel_thresh(img_channel, th=sobel_th, kernel_size=9, op_dirs=[1, 0])
# combine thresholded binary images
img_bin_combined = img_color_bin | img_u_sobel
img_bin_combined_roi = region_of_interest(img_bin_combined, roi_vertex_scales)
if debug:
return img_color_bin, img_u_sobel, img_bin_combined, img_bin_combined_roi
else:
return img_bin_combined_roi
def get_perspective_matrix(img, src_scales, dst_scales):
if len(img.shape) == 3:
ht, wd, _ = img.shape
elif len(img.shape) == 2:
ht, wd = img.shape
else:
raise Exception("Only 2D images are supported.")
src = np.float32([(wd * wd_scale, ht * ht_scale) for (wd_scale, ht_scale) in src_scales])
dst = np.float32([(wd * wd_scale, ht * ht_scale) for (wd_scale, ht_scale) in dst_scales])
M = cv2.getPerspectiveTransform(src, dst)
inv_M = cv2.getPerspectiveTransform(dst, src)
return M, inv_M, src
def get_binary_lane(img_strips, window_scale, offset=0.10):
'''Return a segmented lane using the sliding window method'''
lane = []
window = np.array(window_scale) * img_strips[0].shape[1]
for img_strip in reversed(img_strips):
img_windowed = np.zeros_like(img_strip)
img_windowed[:, window[0]:window[1]] = img_strip[:, window[0]:window[1]]
lane_pts_x = np.where(np.sum(img_windowed, axis=0))
if len(lane_pts_x[0]) > 5:
lane.append(img_windowed)
lane_mean = np.mean(lane_pts_x)
lane_offset = offset * img_strip.shape[1]
window = [int(lane_mean - lane_offset), int(lane_mean + lane_offset)]
else:
lane.append(np.zeros_like(img_windowed))
return np.vstack(reversed(lane))
def fit_lane_pts(pts, y_fit_range=None, num_pts_y_fit=300):
'''Return fitted points or coefficeints of 2nd order fitting x = F(y).
params:
pts: tuple of x_array and y_array `(x_array, y_array)`
'''
pts_x, pts_y = reversed(pts)
coeffs = np.polyfit(pts_y, pts_x, 2)
if y_fit_range is not None:
pts_y_fit = np.linspace(0, y_fit_range, num=num_pts_y_fit)
pts_x_fit = np.polyval(coeffs, pts_y_fit)
return pts_x_fit, pts_y_fit
else:
return coeffs
# def fit_lane_pts_with_coeffs(coeffs, y_fit_range, num_pts_y_fit=300):
# '''Return fitted points given coefficients
#
# params:
# coeffs: 2nd order fitting coefficients with the highest order first
# '''
#
# pts_y_fit = np.linspace(0, y_fit_range, num=num_pts_y_fit)
# pts_x_fit = np.polyval(coeffs, pts_y_fit)
#
# return pts_x_fit, pts_y_fit
def calc_curvature(pts, xm_per_pix=3.7 / 700, ym_per_pix=30 / 720):
'''Calculate curvature given scales from pixel space to real physical space'''
pts = np.array(pts).T * np.array([ym_per_pix, xm_per_pix])
pts = (pts[:, 0], pts[:, 1])
coeffs = fit_lane_pts(pts)
y_eval = np.max(pts[1])
curve_radius = ((1 + (2 * coeffs[0] * y_eval + coeffs[1]) ** 2) ** 1.5) / np.absolute(2 * coeffs[0])
return curve_radius
def lane_detection(img, ROI_vertex_scales,
src_scales, dst_scales,
colors_rgb,
colors_hls,
sobel_th=(80, 150),
num_bins=20,
window_L=(0.1, 0.45),
window_R=(0.6, 0.90),
draw_lane_color=(0, 255, 0),
debug=False):
img_corr = correct_dist(img, obj_pts, img_pts)
ht, wd, _ = img.shape
M, inv_M, pts_src = get_perspective_matrix(img, src_scales, dst_scales)
img_warped = cv2.warpPerspective(img_corr, M, (wd, ht), flags=cv2.INTER_LINEAR)
# thresholding corrected image and
# perspective transformation of the resulting binary image
img_bin = threshold_multi(img_corr, ROI_vertex_scales, colors_rgb=colors_rgb, colors_hls=colors_hls, sobel_th=sobel_th)
img_bin_warped = cv2.warpPerspective(img_bin, M, (wd, ht), flags=cv2.INTER_LINEAR)
# split perpective transformed binary image into multiple horizontal strips
# img_bin_blurred = gaussian_blur(img_bin_warped, kernel_size=blur_kernel_size)
img_bin_splits = np.vsplit(img_bin_warped, num_bins)
# isolate the left and right lane with sliding windows
lane_L = get_binary_lane(img_bin_splits, window_L)
lane_R = get_binary_lane(img_bin_splits, window_R)
pts_L = np.where(lane_L)
pts_R = np.where(lane_R)
if (len(pts_L[0]) < 3) | (len(pts_R[0]) < 3):
return img_corr
# calculate curvature for left/right lane
pts_fit_L = fit_lane_pts(pts_L, y_fit_range=img_bin.shape[0], num_pts_y_fit=300)
curve_radius_L = calc_curvature(pts_L)
pts_fit_R = fit_lane_pts(pts_R, y_fit_range=img_bin.shape[0], num_pts_y_fit=300)
curve_radius_R = calc_curvature(pts_R)
# [curve_radius_L, curve_radius_R]
# create an image to draw the lines on
lane_warped_color = np.zeros_like(img_corr, dtype=np.uint8)
# draw fitted points to a lane image
pts_draw = np.hstack([pts_fit_L, np.fliplr(pts_fit_R)]).T.reshape(-1, 1, 2).astype(np.int)
cv2.fillPoly(lane_warped_color, [pts_draw], draw_lane_color)
# inverse perspective transform of lane image
lane_color = cv2.warpPerspective(lane_warped_color, inv_M, (wd, ht), flags=cv2.INTER_LINEAR)
# overlay detected lanes with the undistorted image
img_combined = cv2.addWeighted(img_corr, 1, lane_color, 0.3, 0)
if debug:
print("The left curvature is {:.1f} m".format(curve_radius_L))
print("The right curvature is {:.1f} m".format(curve_radius_R))
print("")
pts_warp_roi = np.int32(pts_src.reshape([-1, 1, 2]))
pts_roi = np.int32([[(wd * wd_scale, ht * ht_scale) for (wd_scale, ht_scale) in ROI_vertex_scales]])
img_warp_roi = cv2.polylines(img_corr, [pts_warp_roi], True, (0, 255, 0), 5) # draw the warp region in green
img_warp_roi = cv2.polylines(img_warp_roi, [pts_roi], True, (0, 0, 255), 5) # draw the roi selection in blue
return img_warp_roi, img_warped, img_bin, img_bin_warped, lane_L, lane_R, lane_warped_color, img_combined
else:
return img_combined
# Define a class to receive the characteristics of each line detection
class LaneDetector:
def __init__(self, N=30, TOLERANCE_CURVATURE=2, TOLERANCE_PTS=100):
# append to history if the current curvature is less than TOLERANCE_CURVATURE
# compared to the average curvature over N confident frames
self.TOLERANCE_CURVATURE = TOLERANCE_CURVATURE
# proceed with lane curve fitting if detected points are greater than TOLERANCE_PTS
self.TOLERANCE_PTS = TOLERANCE_PTS
# x,y values and fitted polynomial coeffs of the last n fits
# assuming 30 frames per second
self.N = N
self.pts_fit_L_last_n = deque(maxlen=self.N)
self.pts_fit_R_last_n = deque(maxlen=self.N)
# average x,y values of the fitted lanes over the last n fit
self.pts_L_last = None
self.pts_R_last = None
# radius of curvature of the line in some units
self.curve_radius = 0
self.curve_radius_last_n = deque(maxlen=self.N)
self.curve_radius_avg = 0
self.curve_radius_diff = 0
# distance in meters of vehicle center from the line
self.vehicle_offset = None
self.vehicle_offset_last_n = deque(maxlen=self.N)
self.vehicle_offset_avg = None
# # difference in fit coefficients between last and new fits
# self.coeffs_L_last_n = deque(maxlen=self.N)
# self.coeffs_R_last_n = deque(maxlen=self.N)
# self.coeffs_L_avg = None
# self.coeffs_R_avg = None
# self.fit_coeffs_diffs = np.array([0, 0, 0], dtype='float')
# lane mask
self.lane_mask = None
self.lane_masks = []
# problematic frames
self.frame_N = 0
self.error_frame_N = 0
self.error_frames = []
def get_binary_lane(self, img_strips, window_scale, offset=0.10):
'''Return a segmented lane using the sliding window method'''
lane = []
img_window_masks = []
window = (np.array(window_scale) * img_strips[0].shape[1]).astype(np.int)
for img_strip in reversed(img_strips):
img_windowed = np.zeros_like(img_strip)
img_windowed[:, window[0]:window[1]] = img_strip[:, window[0]:window[1]]
img_window_mask = np.zeros_like(img_strip)
img_window_mask[:, window[0]:window[1]] = 1
img_window_masks.append(img_window_mask)
lane_pts_x = np.where(np.sum(img_windowed, axis=0))
if len(lane_pts_x[0]) > 5:
lane.append(img_windowed)
lane_mean = np.mean(lane_pts_x)
lane_offset = offset * img_strip.shape[1]
window = [int(lane_mean - lane_offset), int(lane_mean + lane_offset)]
else:
lane.append(np.zeros_like(img_windowed))
return np.vstack(reversed(lane)), np.vstack(reversed(img_window_masks))
def calc_curvature(self, pts, xm_per_pix=3.7 / 700, ym_per_pix=30 / 720):
'''Calculate curvature given scales from pixel space to real physical space'''
pts = np.array(pts).T * np.array([ym_per_pix, xm_per_pix])
pts = (pts[:, 0], pts[:, 1])
coeffs = fit_lane_pts(pts)
y_eval = np.max(pts[1])
curve_radius = ((1 + (2 * coeffs[0] * y_eval + coeffs[1]) ** 2) ** 1.5) / np.absolute(2 * coeffs[0])
return curve_radius, coeffs
def lane_detection(self, img, ROI_vertex_scales,
src_scales, dst_scales,
colors_rgb,
colors_hls,
sobel_th=(80, 150),
num_bins=20,
window_L=(0.1, 0.45),
window_R=(0.6, 0.90),
draw_lane_color=(0, 255, 0),
debug=False):
img_corr = correct_dist(img, obj_pts, img_pts)
ht, wd, _ = img.shape
M, inv_M, pts_src = get_perspective_matrix(img, src_scales, dst_scales)
img_warped = cv2.warpPerspective(img_corr, M, (wd, ht), flags=cv2.INTER_LINEAR)
# thresholding corrected image and
# perspective transformation of the resulting binary image
img_bin = threshold_multi(img_corr, ROI_vertex_scales, colors_rgb=colors_rgb, colors_hls=colors_hls, sobel_th=sobel_th)
img_bin_warped = cv2.warpPerspective(img_bin, M, (wd, ht), flags=cv2.INTER_LINEAR)
# split perpective transformed binary image into multiple horizontal strips
img_bin_splits = np.vsplit(img_bin_warped, num_bins)
# isolate the left and right lane with masks generated with sliding windows
# if lane_mask is not defined, search the lane lines from scratch
# else use the previous window for lane lines detection
if not self.lane_mask:
lane_L, mask_L = self.get_binary_lane(img_bin_splits, window_L)
lane_R, mask_R = self.get_binary_lane(img_bin_splits, window_R)
self.lane_mask = [mask_L, mask_R]
else:
mask_L, mask_R = self.lane_mask
lane_L = cv2.bitwise_and(img_bin_warped, mask_L)
lane_R = cv2.bitwise_and(img_bin_warped, mask_R)
# get (i,j) coordinates for the lane points
pts_L = np.where(lane_L)
pts_R = np.where(lane_R)
# if the number of lane points detected is less than TOLERANCE_PTS for either lane,
# use the detected points from the last and current frame for subsequent fitting
if (len(pts_L[0]) < self.TOLERANCE_PTS) | (len(pts_R[0]) < self.TOLERANCE_PTS):
self.lane_mask = None
self.error_frame_N += 1
self.error_frames.append(img)
if self.pts_L_last is not None:
# concatenate (i,j) coordinates of points detected for the last and current frame
pts_L = [pts_last + pts for (pts_last, pts) in zip(self.pts_L_last, pts_L)]
pts_R = [pts_last + pts for (pts_last, pts) in zip(self.pts_R_last, pts_R)]
else:
return img_corr
else:
self.pts_L_last = pts_L
self.pts_R_last = pts_R
# calculate curvature for left/right lane
# the curve radius is estimated as the mean of left/right lane, which is smoothed over the last n frames
pts_fit_L = fit_lane_pts(pts_L, y_fit_range=img_bin.shape[0], num_pts_y_fit=ht)
curve_radius_L, coeffs_L = self.calc_curvature(pts_L)
pts_fit_R = fit_lane_pts(pts_R, y_fit_range=img_bin.shape[0], num_pts_y_fit=ht)
curve_radius_R, coeffs_R = self.calc_curvature(pts_R)
self.curve_radius = np.mean([curve_radius_L, curve_radius_R])
self.curve_radius_diff = np.abs((self.curve_radius - self.curve_radius_avg) / self.curve_radius_avg)
# if the lane curve difference is less than TOLERANCE_CURVATURE or is the first frame
# append the current curvature and coefficients to their respective double ended queue
if (self.curve_radius_diff < self.TOLERANCE_CURVATURE) or (self.frame_N == 0):
self.curve_radius_last_n.append(self.curve_radius)
self.curve_radius_avg = np.mean(self.curve_radius_last_n)
# self.coeffs_L_last_n.append(coeffs_L)
# self.coeffs_R_last_n.append(coeffs_R)
# self.coeffs_L_avg = np.mean(self.coeffs_L_last_n, axis=0)
# self.coeffs_R_avg = np.mean(self.coeffs_R_last_n, axis=0)
else:
self.lane_mask = None
# estimate vehicle offset from the center of the road
# using the x coordinates of the last 10 points from the bottom of the frame
xm_per_pix = 3.7 / 700 # meters per pixel
# here a negative sign is needed to measure offsets with respect to the center of the road
self.vehicle_offset = -xm_per_pix * (np.mean(pts_fit_L[0][-10:]) + np.mean(pts_fit_R[0][-10:]) - wd) / 2
self.vehicle_offset_last_n.append(self.vehicle_offset)
self.vehicle_offset_avg = np.mean(self.vehicle_offset_last_n)
# create an image to draw fitted points on
lane_warped_color = np.zeros_like(img_corr, dtype=np.uint8)
# draw fitted points to a lane image
pts_draw = np.hstack([pts_fit_L, np.fliplr(pts_fit_R)]).T.reshape(-1, 1, 2).astype(np.int)
cv2.fillPoly(lane_warped_color, [pts_draw], draw_lane_color)
# inverse perspective transform of lane image
lane_color = cv2.warpPerspective(lane_warped_color, inv_M,
(wd, ht), flags=cv2.INTER_LINEAR)
lane_color = region_of_interest(lane_color, ROI_vertex_scales)
# overlay detected lanes with the undistorted image
img_combined = cv2.addWeighted(img_corr, 1, lane_color, 0.3, 0)
# draw text onto the image
img_txt = "Radius of curvature: {:7.1f}m Offset from road center: {:7.3f}m Errors: {:3.0f} /{:5.0f}".format(self.curve_radius_avg,
self.vehicle_offset_avg,
self.error_frame_N,
self.frame_N)
img_txt_offset = (int(wd * 0.01), int(ht * 0.04))
pts_txt_bounding_box = np.int32([(0, 0), (wd, 0), (wd, ht * 0.05), (0, ht * 0.05)]).reshape([-1, 1, 2])
img_combined = cv2.fillPoly(img_combined, [pts_txt_bounding_box], (43, 43, 43))
cv2.putText(img_combined,
img_txt,
img_txt_offset,
cv2.FONT_HERSHEY_COMPLEX, 0.8,
(250, 250, 250), 1)
self.frame_N += 1
if debug:
print("The left curvature is {:.1f} m".format(curve_radius_L))
print("The right curvature is {:.1f} m".format(curve_radius_R))
print("")
# draw perspective warp and ROI bounding box
pts_warp_roi = np.int32(pts_src.reshape([-1, 1, 2]))
pts_roi = np.int32([[(wd * wd_scale, ht * ht_scale) for (wd_scale, ht_scale) in ROI_vertex_scales]])
img_warp_roi = cv2.polylines(img_corr, [pts_warp_roi], True, (0, 255, 0), 5) # blue for perspective transform bounding box
img_warp_roi = cv2.polylines(img_warp_roi, [pts_roi], True, (0, 0, 255), 5) # green for ROI bounding box
return img_warp_roi, img_warped, img_bin, img_bin_warped, lane_L, lane_R, lane_warped_color, img_combined
else:
return img_combined
def process_image(img):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# the solution is detailed in the function lane_detection above
return lane_detector.lane_detection(img, ROI_vertex_scales, src_scales, dst_scales,
colors_rgb, colors_hls,
window_L=window_L, window_R=window_R, debug=False)
NUM_X = 9
NUM_Y = 6
LOAD_OBJ_IMG_PTS = True
img_cal_names = natsorted(glob.glob('camera_cal/*.jpg'))
if not LOAD_OBJ_IMG_PTS:
obj_pts, img_pts, img_cals, img_cal_names_ret = get_obj_img_pts(img_cal_names, num_x=NUM_X, num_y=NUM_Y)
with open(r"obj_img_pts", "wb") as file_output:
pickle.dump([obj_pts, img_pts], file_output)
else:
with open(r"obj_img_pts", "rb") as file_input:
obj_pts, img_pts = pickle.load(file_input)
ROI_vertex_scales = [(0.48, 0.59), (0.52, 0.59), (0.65, 0.65), (0.95, 1), (0.05, 1), (0.35, 0.65)]
src_x_top, src_x_bot = 0.42, 0.065 # counting from the left edge
src_y_top, src_y_bot = 0.67, 0
dst_x_top, dst_x_bot = 0.2, 0.2 # counting from the left edge
dst_y_top, dst_y_bot = 0.2, 0
src_scales = [(src_x_top, src_y_top), (1 - src_x_top, src_y_top),
(1 - src_x_bot, 1 - src_y_bot), (src_x_bot, 1 - src_y_bot)]
dst_scales = [(dst_x_top, dst_y_top), (1 - dst_x_top, dst_y_top),
(1 - dst_x_top, 1 - dst_y_top), (dst_x_top, 1 - dst_y_bot)]
colors_rgb = [(np.uint8([190, 190, 190]), np.uint8([255, 255, 255]))]
colors_hls = [(np.uint8([0, 120, 150]), np.uint8([75, 255, 255])),
(np.uint8([75, 180, 0]), np.uint8([120, 255, 35]))]
window_L = (0.1, 0.45)
window_R = (0.6, 0.90)
lane_detector = LaneDetector()
# img_test_names = natsorted(glob.glob("test_images/*.jpg"))
# for img_test_name in img_test_names[0:2]:
# print(img_test_name)
# img = mpimg.imread(img_test_name)
#
# imgs = lane_detector.lane_detection(img, ROI_vertex_scales, src_scales, dst_scales,
# colors_rgb, colors_hls,
# window_L=window_L, window_R=window_R, debug=True)
#
# img_names = ["undistorted", "perspective warped", "thresholded", "thresholded warped",
# "left lane", "right lane", "warped detected lanes", "combined with detected lanes"]
#
# fig = img_subplots(imgs, img_names, f_size=(10, 12), f_cols=2)
# fig.suptitle(img_test_name, y=1.05, fontsize=16)
# plt.axis("on")
#
# plt.show()
# clip_files = ["test.mp4", "challenge_video.mp4", "harder_challenge_video.mp4"]
clip_files = ["project_video.mp4", "challenge_video.mp4", "harder_challenge_video.mp4"]
for clip_file in clip_files[0:1]:
clip = VideoFileClip(clip_file)
clip_out = clip.fl_image(process_image)
clip_out.write_videofile("z_sol_" + clip_file, audio=False)
print("======================================================")
``` |
{
"source": "JingzhiYAN/taobao-traffic-bot",
"score": 2
} |
#### File: JingzhiYAN/taobao-traffic-bot/import requests.py
```python
import requests
def main():
headers = {
"charset":"utf-8",
"Accept-Encoding":"gzip",
"referer":"https://servicewechat.com/wxffc08ac7df482a27/117/page-frame.html",
"authorization":"5bda7657a4ce660001f7eed8",
"auth":"<KEY>",
"content-type":"application/json",
"auth-sign":"<KEY>",
"User-Agent":"Mozilla/5.0 (Linux; Android 7.1.2; MI 5X Build/N2G47H; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/64.0.3282.137 Mobile Safari/537.36 MicroMessenger/6.7.3.1360(0x26070336) NetType/WIFI Language/zh_CN Process/appbrand2",
"Host":"www.xiaohongshu.com",
"Connection":"Keep-Alive",
}
# url = "http://www.xiaohongshu.com/sapi/wx_mp_api/sns/v1/homefeed?oid=homefeed.cosmetics_v2&cursor_score=&sid=session.1540996623416187718"
url = "http://www.xiaohongshu.com/sapi/wx_mp_api/sns/v1/homefeed?oid=homefeed.cosmetics_v2&cursor_score=1541067389.9550&sid=session.1540996623416187718"
datas = requests.get(url= url, headers=headers ).json()
data = datas['data']
# print(data)
for i in data:
print(i)
# print(i['title'])
# print(i['share_link'])
title = '标题: ' + i['mini_program_info']['share_title']
print(title)
link_url = '链接: ' + i['share_link']
print(link_url)
b_picture = '封面图片: '+ i['mini_program_info']['thumb']
print(b_picture)
type = '类型: ' + i['type']
print(type)
level = '级别: ' + str(i['level'])
print(level)
h_picture = '用户头像: ' + i['user']['images']
print(h_picture)
username = '用户名: ' + i['user']['nickname']
print(username)
user_id = 'userid: ' + i['user']['userid']
print(user_id)
zan = '喜欢点心: ' + str(i['likes'])
print(zan)
# 以追加的方式及打开一个文件,文件指针放在文件结尾,追加读写!
with open('text', 'a', encoding='utf-8')as f:
f.write('\n'.join([title,link_url,b_picture,type,level,h_picture,username,user_id,zan]))
f.write('\n' + '=' * 100 + '\n')
if __name__ == "__main__":
main()
``` |
{
"source": "jingzhongxu/teether",
"score": 3
} |
#### File: teether/bin/asm.py
```python
import sys
import teether
def assemble(code):
instructions = []
refs = {}
for line in code.splitlines():
tokens = line.upper().split(':')
if len(tokens) == 2:
label = tokens[0].strip()
ins = tokens[1]
else:
label = None
ins = tokens[0]
tokens = ins.split()
if not tokens:
continue
if tokens[0] != 'PUSH':
if tokens[0] not in teether.cfg.opcodes.reverse_opcodes:
print('Unknown instruction "%s"' % tokens[0], file=sys.stderr)
continue
instructions.append((chr(teether.cfg.opcodes.reverse_opcodes[tokens[0]]), '', label))
elif tokens[0] == 'PUSH':
if tokens[1].startswith('@'):
ref_label = tokens[1][1:]
if ref_label not in refs:
refs[ref_label] = []
refs[ref_label].append(len(instructions))
instructions.append(('REFPUSH', ref_label, label))
else:
v = int(tokens[1], 16)
hex_v = '%x' % v
if len(hex_v) % 2 == 1:
hex_v = '0' + hex_v
v_size = len(hex_v) / 2
instructions.append(
(chr(teether.cfg.opcodes.reverse_opcodes['PUSH%d' % v_size]), bytes.fromhex(hex_v), label))
refsize = 1
while sum(len(i) + len(a) if i != 'REFPUSH' else 1 + refsize for i, a, l in instructions) >= 256 ** refsize:
refsize += 1
mask = '%%0%dx' % (refsize * 2)
code = ''
labels = {}
pc = 0
for i, a, l in instructions:
if l is not None:
labels[l] = pc
if i == 'REFPUSH':
pc += 1 + refsize
else:
pc += 1 + len(a)
for i, a, l in instructions:
if i == 'REFPUSH':
i = chr(teether.cfg.opcodes.reverse_opcodes['PUSH%d' % refsize])
a = bytes.fromhex(mask % labels[a])
code += i + a
return code
if __name__ == '__main__':
import fileinput
print(assemble('\n'.join(fileinput.input())).hex())
``` |
{
"source": "jingzhou123/tieba-crawler",
"score": 2
} |
#### File: tieba-crawler/dirbot/pipelines.py
```python
from scrapy.exceptions import DropItem
from twisted.enterprise import adbapi
import logging
import _mysql_exceptions
import sys
reload(sys)
sys.setdefaultencoding('utf8')
class TbBasePipeline(object):
"""docstring for TbBasePipeline"""
def __init__(self, dbpool):
self.dbpool = dbpool
def noop(self):
pass
def target_spider_name(self):
"""TODO: Docstring for target_spider_name.
:returns: TODO
"""
return None
def do_upsert(self, conn, item, spider):
"""跟数据库有关的操作.
:conn: TODO
:item: TODO
:spider: TODO
:returns: TODO
"""
return item
def process_item(self, item, spider):
"""TODO: Docstring for process_item.
:conn: TODO
:item: TODO
:spider: TODO
:returns: TODO
"""
names_or_name_str = self.target_spider_name();
if isinstance(names_or_name_str, str) and names_or_name_str != spider.name:
return item
if isinstance(names_or_name_str, list):
for name in names_or_name_str:
if name == spider.name:
break
return item
# run db query in the thread pool
d = self.dbpool.runInteraction(self.do_upsert, item, spider)
d.addErrback(self._handle_error, item, spider)
# at the end return the item in case of success or failure
d.addBoth(lambda _: item)
# return the deferred instead the item. This makes the engine to
# process next item (according to CONCURRENT_ITEMS setting) after this
# operation (deferred) has finished.
return d
def _handle_error(self, failure, item, spider):
"""Handle occurred on db interaction."""
# do nothing, just log
logging.error(failure)
@classmethod
def from_settings(cls, settings):
dbargs = dict(
host=settings['MYSQL_HOST'],
db=settings['MYSQL_DBNAME'],
user=settings['MYSQL_USER'],
passwd=settings['MYSQL_PASSWD'],
charset='utf8',
use_unicode=True,
)
dbpool = adbapi.ConnectionPool('MySQLdb', **dbargs)
return cls(dbpool)
class TiebaPipeline(TbBasePipeline):
"""A pipeline to store the item in a MySQL database.
This implementation uses Twisted's asynchronous database API.
code below is referred from
'https://github.com/rolando/dirbot-mysql/blob/master/dirbot/pipelines.py'
"""
def process_item(self, item, spider):
if spider.name != 'tieba':
d = self.dbpool.runInteraction(self.noop, item, spider)
d.addBoth(lambda _: item)
return d
logging.debug('processing tieba: %r' % (item))
# run db query in the thread pool
d = self.dbpool.runInteraction(self._do_upsert, item, spider)
d.addErrback(self._handle_error, item, spider)
# at the end return the item in case of success or failure
d.addBoth(lambda _: item)
# return the deferred instead the item. This makes the engine to
# process next item (according to CONCURRENT_ITEMS setting) after this
# operation (deferred) has finished.
return d
def _insert_tieba_admins(self, conn, item, spider):
"""TODO: INSERT IGNORE ...
:conn: TODO
:item: TODO
:spider: TODO
:returns: TODO
"""
for name in item['admin_names']:
#logging.debug("(%s, %s)" % (name, item['name'])); #right
#logging.debug('%s' % (type(name))) #unicode
conn.execute("""
INSERT user_manage_tieba VALUES(%s, %s)
ON DUPLICATE KEY UPDATE user_name=%s, tieba_name=%s
""", (name, item['name'], name, item['name']))
def _do_upsert(self, conn, item, spider):
logging.debug('processing item from posts..')
"""Perform an insert or update."""
conn.execute("""SELECT EXISTS(
SELECT name FROM tieba WHERE name = %s
)""", (item['name'], ))
ret = conn.fetchone()[0]
if ret:
conn.execute("""
UPDATE tieba
SET followed_num=%s, belong_dir=%s, slogan=%s, posts_num=%s,admin_num=%s
WHERE name=%s
""", (item['members_num'], item['dir_name'], item['slogan'],
item['posts_num'], item['admin_num'], item['name'], ))
else:
conn.execute("""
INSERT INTO tieba VALUES (DEFAULT, %s, %s, %s, %s, %s, %s)
""", (
item['name'], item['members_num'], item['admin_num'],
item['posts_num'], item['slogan'], item['dir_name'],
))
self._insert_tieba_admins(conn, item, spider);
def _handle_error(self, failure, item, spider):
"""Handle occurred on db interaction."""
# do nothing, just log
logging.error(failure)
class PostPipeline(TbBasePipeline):
"""Docstring for PostPipeline. """
def process_item(self, item, spider):
"""TODO: Docstring for process_item.
:item: TODO
:returns: TODO
"""
if spider.name != 'post':
d = self.dbpool.runInteraction(self.noop, item, spider)
d.addBoth(lambda _: item)
return d
logging.debug('processing post: %r' % (item))
# run db query in the thread pool
d = self.dbpool.runInteraction(self._do_upsert, item, spider)
d.addErrback(self._handle_error, item, spider)
# at the end return the item in case of success or failure
d.addBoth(lambda _: item)
# return the deferred instead the item. This makes the engine to
# process next item (according to CONCURRENT_ITEMS setting) after this
# operation (deferred) has finished.
return d
def _fill_in_data(self, item):
"""TODO: Docstring for _fill_in_data.
:item: TODO
:returns: TODO
"""
item['id'] = int(item['id'])
item['reply_num'] = int(item['reply_num'])
item['post_time'] = '1970-1-1'# shim data
return item
def _do_upsert(self, conn, item, spider):
"""TODO: Docstring for _do_upsert.
:returns: TODO
"""
#logging.debug('filtering ads...')
#logging.debug('post: %r' % (item))
if item['id'] == '-1':#广告贴
return
logging.debug('filtered ads...')
item = self._fill_in_data(item)
conn.execute( """
INSERT INTO post SET
author_name=%s, tieba_name=%s, title=%s,
body=%s, post_time=%s, reply_num=%s, id=%s
""",
(
item['author_name'],
item['tieba_name'],
item['title'],
item['body'],
item['post_time'],
item['reply_num'],
item['id']
)
)
def _handle_error(self, failure, item, spider):
"""Handle occurred on db interaction."""
# do nothing, just log
logging.error(failure)
class ReplyPipeline(TbBasePipeline):
"""Docstring for ReplyPipeline. """
def _do_upsert(self, conn, item, spider):
"""TODO: Docstring for _do_update.
:item: TODO
:spider: TODO
:returns: TODO
"""
if item['type'] == 'MAIN':
conn.execute("""
UPDATE post SET title=%s, body=%s, post_time=%s where id=%s
""", (
item['title'],
item['body'],
item['post_time'],
item['id'])
)
else:
logging.debug('item id: %s' % (item['id']))
conn.execute("""
INSERT INTO reply SET
author_name=%s, body=%s, id=%s, title = %s,
post_time=%s, post_id=%s, reply_num=%s
""", (
item['author_name'],
item['body'],
item['id'],
item['title'],
item['post_time'],
item['post_id'],
item['reply_num'],
))
def process_item(self, item, spider):
if spider.name != 'reply':
d = self.dbpool.runInteraction(self.noop, item, spider)
d.addBoth(lambda _: item)
return d
logging.debug('processing reply: %r' % (item))
d = self.dbpool.runInteraction(self._do_upsert, item, spider)
d.addErrback(self._handle_error, item, spider)
# at the end return the item in case of success or failure
d.addBoth(lambda _: item)
return d
class CommentPipeline(TbBasePipeline):
"""Docstring for CommentPipeline. """
def _do_upsert(self, conn, item, spider):
"""TODO: Docstring for _do_upsert.
:returns: TODO
"""
logging.debug('item inserted: %r' % (item))
conn.execute("""
INSERT comment VALUES(%s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE id=%s, reply_id=%s, author_name=%s, body=%s, post_time=%s
""", (
item['id'], item['reply_id'], item['author_name'], item['body'], item['post_time'],
item['id'], item['reply_id'], item['author_name'], item['body'], item['post_time']
))
def process_item(self, item, spider):
"""TODO: Docstring for process_item.
:item: TODO
:spider: TODO
:returns: TODO
"""
if spider.name != 'comment':
d = self.dbpool.runInteraction(self.noop, item, spider)
d.addBoth(lambda _: item)
return d
#logging.debug('processing comment: %r' % (item))
d = self.dbpool.runInteraction(self._do_upsert, item, spider)
d.addErrback(self._handle_error, item, spider)
# at the end return the item in case of success or failure
d.addBoth(lambda _: item)
return d
class MemberPipeline(TbBasePipeline):
def target_spider_name(self):
"""用户关注的贴吧.
:returns: TODO
"""
return 'member'
def do_upsert(self, conn, item, spider):
"""TODO: Docstring for do_upsert.
:conn: TODO
:item: TODO
:spider: TODO
:returns: TODO
"""
conn.execute("""INSERT INTO user_follow_tieba values(%s, %s)""", (item['user_name'], item['tieba_name']));
class UserAsFollowPipeline(TbBasePipeline):
"""Docstring for UserAsMemberPipeline. """
def target_spider_name(self):
return 'user_follow'
def do_upsert(self, conn, item, spider):
conn.execute("""
UPDATE follow SET
following_num=%s, followed_num=%s, tieba_age=%s, posts_num=%s, baidu_id=%s
where name=%s
""", (
item['following_num'],
item['followed_num'],
item['tieba_age'],
item['posts_num'],
item['baidu_id'],
item['name']
))#吧龄 (x)x.x年 发贴数: x万 or 1234
class UserAsFanPipeline(TbBasePipeline):
"""Docstring for UserAsMemberPipeline. """
def target_spider_name(self):
return 'user_fan'
def do_upsert(self, conn, item, spider):
conn.execute("""
UPDATE fan SET
following_num=%s, followed_num=%s, tieba_age=%s, posts_num=%s, baidu_id=%s
where name=%s
""", (
item['following_num'],
item['followed_num'],
item['tieba_age'],
item['posts_num'],
item['baidu_id'],
item['name']
))#吧龄 (x)x.x年 发贴数: x万 or 1234
class UserPipeline(TbBasePipeline):
"""Docstring for UserAsMemberPipeline. """
def target_spider_name(self):
"""TODO: Docstring for target_spider_name.
:returns: TODO
"""
return ['user_member', 'user_fan', 'user_follow']
def do_upsert(self, conn, item, spider):
conn.execute("""
INSERT INTO user SET
following_num=%s, followed_num=%s, tieba_age=%s, posts_num=%s, baidu_id=%s, name=%s
""", (
item['following_num'],
item['followed_num'],
item['tieba_age'],
item['posts_num'],
item['baidu_id'],
item['name']
))
class UserAsMemberPipeline(TbBasePipeline):
"""Docstring for UserAsMemberPipeline. """
def target_spider_name(self):
return 'user_member'
def do_upsert(self, conn, item, spider):
try:
conn.execute("""INSERT INTO user SET following_num=%s, followed_num=%s, tieba_age=%s, posts_num=%s, name=%s, baidu_id=%s""", (
item['following_num'], item['followed_num'], item['tieba_age'], item['posts_num'], item['name'], item['baidu_id']))#吧龄 (x)x.x年 发贴数: x万 or 1234
except _mysql_exceptions.IntegrityError, e:# 有重复项,在某次已经爬下来
pass
query_items = []
for tieba_name in item['following_tieba_name_array']:
logging.debug('user: %r' % (item))
query_items.append((item['name'], tieba_name))
try:
conn.executemany("""INSERT INTO user_follow_tieba VALUES (%s, %s)""", query_items)
except _mysql_exceptions.IntegrityError, e:# 有重复项,说明是已经存在的关系,例如贴吧的会员
pass# 什么也不做
class FanPipeline(TbBasePipeline):
"""Docstring for FanPipeline. """
def target_spider_name(self):
return 'fan'
def do_upsert(self, conn, item, spider):
try:
conn.execute("""
INSERT INTO fan SET name=%s, baidu_id=%s
""", (
item['name'],
item['baidu_id']
))
except Exception, e:
logging.debug(e)
conn.execute("""
INSERT INTO user_follow_user SET to_user_name=%s, from_user_name=%s
""", (
item['user_name_followed'],
item['name']
))
class FollowPipeline(TbBasePipeline):
"""Docstring for FanPipeline. """
def target_spider_name(self):
return 'follow'
def do_upsert(self, conn, item, spider):
conn.execute("""
INSERT INTO follow SET name=%s, baidu_id=%s
""", (
item['name'],
item['baidu_id']
))
conn.execute("""
INSERT INTO user_followed_user SET
to_user_name=%s, from_user_name=%s
""", (
item['user_name_following'],
item['name']
))
```
#### File: dirbot/spiders/post.py
```python
from scrapy import Request
from cookieSpider import CookieSpider as Spider
from scrapy.selector import Selector
from dirbot.settings import TIEBA_NAMES_LIST
from dirbot.items import Post
import logging
class PostSpider(Spider):
"""Docstring for PostSpider. """
name = 'post'
allowed_domains = ["baidu.com"]
def _extract_post_id(self, href):# href = /p/123456789
try:
return href.split('/')[-1]
except Exception, e:
return -1#没有ID的帖子就是广告,在pipeline里要过滤掉
def start_requests(self):
"""TODO: Docstring for start_requests.
:returns: TODO
"""
url_list = map(
lambda name: ("http://tieba.baidu.com/f?ie=utf-8&kw=" + name),
TIEBA_NAMES_LIST
)
for url in url_list:
yield Request(url, callback=self.parse)
def _parse_posts(self, response):
"""TODO: Docstring for _parse_posts.
:response: TODO
:returns: TODO
"""
#logging.debug('parsing a post..')
tieba_name = Selector(response).css('.card_title_fname::text').extract_first().strip()[:-1]# XX吧 -> XX
post_item_sels = Selector(response).css('#thread_list>li')
#logging.debug('posts total num: %s', len(post_item_sels))
for sel in post_item_sels:
item = Post()
item['id'] = self._extract_post_id(sel.css('.j_th_tit a::attr(href)').extract_first())
#logging.debug('post id: %s' % (sel.css('.j_th_tit a::attr(href)').extract_first()))
item['tieba_name'] = tieba_name
item['title'] = sel.css('.j_th_tit a::text').extract_first()# 有时标题过长会被截断,在帖子回复爬虫里再爬一遍完整的标题
item['reply_num'] = sel.css('.threadlist_rep_num::text').extract_first()# 这里有可能是‘推广’,而非数字,在pipeline里过滤一遍
item['author_name'] = sel.css('.tb_icon_author a::text').extract_first()
item['body'] = sel.css('.threadlist_detail .threadlist_abs_onlyline::text').extract_first()
#遇到取不到帖子内容的情况,有可能是广告或者其它类型的无ID的贴子
if item['body'] is None:
item['body'] = ''
else:
item['body'] = item['body'].strip()#去掉回车和空格
#item['post_time'] = sel.css('') #这里拿不到发贴时间,只有最后回复时间
logging.debug('帖子:%r' % (item))
yield item
def should_stop(self, item):
"""stop crawl if possible, can be inheritted
:item: TODO
:returns: TODO
"""
return False
def parse(self, response):
"""TODO: Docstring for pass.
:response: TODO
:returns: TODO
"""
for item in self._parse_posts(response):
if not self.should_stop(item):
yield item
else:
return
if len(Selector(response).css('#frs_list_pager .next')):
#贴吧的分页有的不是完整的链接
next_page_url = Selector(response).css('#frs_list_pager .next::attr(href)').extract_first()
logging.debug('next_page_url %s', next_page_url)
if -1 != next_page_url.find('http://tieba.baidu.com'):
yield Request(next_page_url, callback=self.parse)
else:
yield Request('http://tieba.baidu.com' + next_page_url, callback=self.parse)
```
#### File: dirbot/spiders/user_fan.py
```python
from dirbot.items import User
from user import UserSpider
from scrapy import Request, Selector
from urlparse import urlparse, parse_qs
import logging
import json
class UserFanSpider(UserSpider):
"""Docstring for UserSpider. """
name = 'user_fan'# 命名规则 user_{从哪种渠道获得的用户名称}
def query_some_records(self, start_index = 0, num = 50):
"""TODO: Docstring for query_some_records.
:start_index: TODO
:num: TODO
:returns: TODO
"""
cursor = self.conn.cursor()
cursor.execute("""
SELECT name from fan limit %s, %s
""", (
start_index,
num
))# 去重
return cursor.fetchall()
```
#### File: dirbot/spiders/user_relation.py
```python
from cookieSpider import CookieSpider
from dbSpider import DbSpider
from scrapy import Selector
from urlparse import urlparse, parse_qs
import logging
class UserRelationSpider(CookieSpider, DbSpider):
"""must provide name and request_url_tmpl"""
def __init__(self):
"""todo: to be defined1. """
CookieSpider.__init__(self)
DbSpider.__init__(self)
def query_some_records(self, start_index = 0, num = 50):
"""todo: docstring for query_some_records.
:start_index: todo
:num: todo
:returns: todo
"""
cursor = self.conn.cursor()
# baidu_id: 用户的百度16字节id
cursor.execute("""
select baidu_id, name from user limit %s, %s
""", (
start_index,
num
))# 去重
return cursor.fetchall()
def url_from_row(self, row):
"""todo: docstring for url_from_row.
:row: todo
:returns: todo
"""
return self.request_url_tmpl % (row[0]) # row only has user's baidu_id
def next_page(self, response):
"""todo: docstring for next_page.
:response: todo
:returns: todo
"""
href = Selector(response).css('.next::attr(href)').extract_first()
return 'http://tieba.baidu.com' + href if href else False
def parse_page(self, response):
"""must be implemented to parse a page.
:response: todo
:returns: todo
"""
pass
``` |
{
"source": "jingziyou/Peppa_Pig_Face_Engine",
"score": 3
} |
#### File: core/api/face_detector.py
```python
import numpy as np
import cv2
import tensorflow as tf
from config import config as cfg
from lib.logger.logger import logger
class FaceDetector:
def __init__(self):
"""
the model was constructed by the params in config.py
"""
self.model_path=cfg.DETECT.model_path
self.thres=cfg.DETECT.thres
self.input_shape=cfg.DETECT.input_shape
logger.info('INIT THE FACELANDMARK MODEL...')
self.model =tf.saved_model.load(cfg.DETECT.model_path)
def __call__(self, image):
"""Detect faces.
Arguments:
image: a numpy uint8 array with shape [height, width, 3],
that represents a RGB image.
Returns:
boxes: a float numpy array of shape [num_faces, 5].
"""
image, scale_x, scale_y = self.preprocess(image,
target_width=self.input_shape[1],
target_height=self.input_shape[0])
image = np.expand_dims(image, 0)
res = self.model.inference(image)
boxes = res['boxes'].numpy()
scores = res['scores'].numpy()
num_boxes = res['num_boxes'].numpy()
##sqeeze the box
num_boxes = num_boxes[0]
boxes = boxes[0][:num_boxes]
scores = scores[0][:num_boxes]
to_keep = scores > self.thres
boxes = boxes[to_keep]
scores = scores[to_keep]
###recorver to raw image
scaler = np.array([self.input_shape[0] / scale_y,
self.input_shape[1] / scale_x,
self.input_shape[0] / scale_y,
self.input_shape[1] / scale_x], dtype='float32')
boxes = boxes * scaler
scores = np.expand_dims(scores, 0).reshape([-1, 1])
#####the tf.nms produce ymin,xmin,ymax,xmax, swap it in to xmin,ymin,xmax,ymax
for i in range(boxes.shape[0]):
boxes[i] = np.array([boxes[i][1], boxes[i][0], boxes[i][3], boxes[i][2]])
return np.concatenate([boxes, scores], axis=1)
def preprocess(self, image, target_height, target_width, label=None):
###sometimes use in objs detects
h, w, c = image.shape
bimage = np.zeros(shape=[target_height, target_width, c], dtype=image.dtype) + np.array(cfg.DATA.pixel_means,
dtype=image.dtype)
long_side = max(h, w)
scale_x = scale_y = target_height / long_side
image_resized = cv2.resize(image, None, fx=scale_x, fy=scale_y)
h_resized, w_resized, _ = image_resized.shape
bimage[:h_resized, :w_resized, :] = image_resized
return bimage, scale_x, scale_y
``` |
{
"source": "jinh574/python-bakerbird",
"score": 3
} |
#### File: python-bakerbird/aho_corasick/aho_corasick.py
```python
from collections import deque
from .node import Node
class AhoCorasick(object):
def __init__(self):
'''
AhoCorasick 이니셜라이징
'''
self.head = Node()
self.head.fail = 0
self.pattern = set()
self.idx = 1
self.aho_corasick = {0: self.head}
def __call__(self, text):
'''
미리 만들어 놓음 Trie를 기반으로 AhoCorasick 알고리즘을 구현
:param text: 매칭할 텍스트
:return: 매칭 결과를 시작, 끝, 키워드 튜플 배열로 반환 [(INT start, INT end, STRING keyword), ...]
'''
current = self.head
ret = []
for idx, char in enumerate(text):
while True:
if not current.goto(char) and current.idx is not 0:
current = self.aho_corasick[current.fail]
else:
child = current.goto(char)
break
if child:
current = child
if child.output:
keyword = max(list(child.output), key=len)
start = idx - len(keyword) + 1
end = start + len(keyword)
ret.append((start, end, keyword))
return ret
def add_pattern(self, pattern):
'''
TRIE에 생성할 패턴을 입력받는 함수
:param pattern: TRIE를 생성할 패턴
:return: -
'''
self.pattern.add(pattern)
current = self.head
for char in pattern:
if char not in current.children.keys():
current.children[char] = Node(self.idx, char)
self.aho_corasick[self.idx] = current.children[char]
self.idx += 1
current = current.children[char]
current.output.add(pattern)
def add_patterns(self, patterns):
'''
배열 형식으로 패턴을 여러개를 한꺼번에 입력받는 함수
:param patterns: TRIE를 생성할 패턴 배열
:return: -
'''
if type(patterns) is str:
patterns = patterns.split()
assert type(patterns) is list, "Please input list or str with space"
for pattern in patterns:
self.add_pattern(pattern)
def _compute_fail_func(self):
'''
입력받은 패턴들을 기반으로 failure function을 계산하는 함수
:return: -
'''
queue = deque()
for node in self.head.children.values():
queue.append(node)
while queue:
target = queue.popleft()
for node in target.children.values():
queue.append(node)
idx = target.fail
char = node.char
current = self.aho_corasick[idx]
while not current.goto(char) and current.idx is not 0:
new_idx = current.fail
current = self.aho_corasick[new_idx]
if not current.goto(char):
node.fail = current.idx
else:
node.fail = current.goto(char).idx
node.set_output(self.aho_corasick[node.fail].output)
def build(self):
'''
패턴 입력 후 트리거 함수
:return: -
'''
self._compute_fail_func()
if __name__ == "__main__":
aho = AhoCorasick()
aho.add_pattern("hi")
aho.add_pattern("this")
aho.build()
aho("this is my first aho-corasick implemented. and")
```
#### File: jinh574/python-bakerbird/checker.py
```python
import argparse
from utils import load_input_data, load_output_data, save_output_data, save_check_data
# 실행 매개변수 Parsing
parser = argparse.ArgumentParser(description="Homework checker program")
parser.add_argument("input", type=str,
help="Input txt file path")
parser.add_argument("output", type=str,
help="Output txt file path")
parser.add_argument("check", type=str,
help="Check status txt file path")
args = parser.parse_args()
def checker_numpy(pattern_len, text_len, patterns, text):
'''
Checker program core code.
패턴 2차원 배열이 Window sliding 형식으로 텍스트를 1칸씩 시프트하면서 순차 검색 후 모든 문자가 같으면 해당 position을 반환
:param pattern_len: 패턴 매트릭스의 크기
:param text_len: 텍스트 매트릭스의 크기
:param patterns: 패턴 2차원 배열
:param text:: 텍스트 2차원 배열
:return: 텍스트 내 매칭되는 모든 매트릭스의 postion을 튜플 형식으로 반환
'''
import numpy as np
ret = []
pattern_np, text_np = np.array([list(v) for v in patterns]), np.array([list(v) for v in text])
for i in range(text_len - pattern_len + 1):
for j in range(text_len - pattern_len + 1):
mask = text_np[i:i + pattern_len, j:j + pattern_len] == pattern_np
if mask.all():
ret.append((i + pattern_len - 1, j + pattern_len - 1))
return ret
def checker_naive(pattern_len, text_len, patterns, text):
'''
패턴 2차원 배열이 Window sliding 형식으로 텍스트를 1칸씩 시프트하면서 순차 검색 후 모든 문자가 같으면 해당 position을 반환
:param pattern_len: 패턴 매트릭스의 크기
:param text_len: 텍스트 매트릭스의 크기
:param patterns: 패턴 2차원 배열
:param text:: 텍스트 2차원 배열
:return: 텍스트 내 매칭되는 모든 매트릭스의 postion을 튜플 형식으로 반환
'''
ret = []
pattern_arr, text_arr = [list(v) for v in patterns], [list(v) for v in text]
for i in range(text_len - pattern_len + 1):
for j in range(text_len - pattern_len + 1):
mask = [e[j:j + pattern_len]for e in text_arr[i:i + pattern_len]] == pattern_arr
if mask:
ret.append((i + pattern_len - 1, j + pattern_len - 1))
return ret
if __name__ == "__main__":
data = load_input_data(args.input)
outputs = load_output_data(args.output)
check_result = checker_naive(*data)
print(check_result)
if check_result == outputs:
save_check_data(args.check, "yes")
else:
save_check_data(args.check, "no")
```
#### File: jinh574/python-bakerbird/utils.py
```python
import os
import pathlib
class Streamer(object):
def __init__(self, file_path):
self.f = open(file_path, "r")
self.pattern_byte = 0
def next(self):
if self.f.tell() == 0:
line = self.f.readline().replace("\n", "")
self.pattern_byte = self.f.tell()
else:
line = self.f.readline().replace("\n", "")
return line
def set_seek(self):
self.f.seek(self.pattern_byte)
def load_input_data_stream(path):
'''
입력 데이터 형식을 스트림 형식으로 제공하는 함수
:param path: 입력 데이터의 파일 경로
:return: 순서대로 들어오는 정보인 패턴크기, 텍스트크기, 파일 스트림 객체 반환
'''
assert os.path.exists(path), "Please check input path"
stream = Streamer(path)
line = stream.next()
pattern_len, text_len = line.split()
pattern_len, text_len = int(pattern_len), int(text_len)
return pattern_len, text_len, stream
def load_input_data(path):
'''
입력 데이터 형식을 파싱하고 튜플 형태로 반환하는 함수
:param path: 입력 데이터의 파일 경로
:return: 순서대로 들어오는 정보인 패턴크기, 텍스트크기, 패턴에 대한 2차원배열, 텍스트에 대한 2차원 배열을 반환
'''
assert os.path.exists(path), "Please check input path"
with open(path, "r") as f:
lines = [line.replace("\n", "").strip() for line in f.readlines()]
pattern_len, text_len = lines[0].split()
pattern_len, text_len = int(pattern_len), int(text_len)
patterns = lines[1:1+pattern_len]
text = lines[1+pattern_len:1+pattern_len+text_len]
return pattern_len, text_len, patterns, text
def load_output_data(path):
'''
출력 데이터 형식을 파싱하고 튜플 형태로 반환하는 함수
:param path: 출력 데이터의 파일 경로
:return: 한줄마다 패턴 매칭 결과를 튜플 배열로 만들어 반환
'''
assert os.path.exists(path), "Please check output path"
with open(path, "r") as f:
lines = [line.replace("\n","").strip().split() for line in f.readlines()]
return [(int(line[0]), int(line[1])) for line in lines]
def save_output_data(path, result):
'''
출력 데이터 형식대로 결과 튜플을 출력해주는 함수
:param path: 출력 데이터의 파일 경로
:param result: 매칭된 패턴의 위치 튜플의 배열
:return: -
'''
dirname = os.path.dirname(path)
pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
with open(path, "w") as f:
for i, j in result:
f.write(f'{i} {j}\n')
def save_check_data(path, result):
'''
Check 데이터 형식대로 구현된 Baker-bird 알고리즘과 Checker program의 결과를 출력하는 함수
:param path: Check 데이터의 파일 경로
:param result: Checker 결과
:return: -
'''
dirname = os.path.dirname(path)
pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
with open(path, "w") as f:
f.write(result)
``` |
{
"source": "JinHai-CN/milvus",
"score": 2
} |
#### File: tests/milvus_benchmark/k8s_runner.py
```python
import os
import logging
import pdb
import string
import time
import re
import random
import traceback
import json
import csv
from multiprocessing import Process
import numpy as np
from yaml import full_load, dump
from concurrent import futures
from client import MilvusClient
import parser
from runner import Runner
from milvus_metrics.api import report
from milvus_metrics.models import Env, Hardware, Server, Metric
import utils
logger = logging.getLogger("milvus_benchmark.k8s_runner")
namespace = "milvus"
default_port = 19530
DELETE_INTERVAL_TIME = 5
# INSERT_INTERVAL = 100000
INSERT_INTERVAL = 50000
timestamp = int(time.time())
default_path = "/var/lib/milvus"
class K8sRunner(Runner):
def __init__(self):
"""
Run with helm mode.
Upload test result after tests finished
"""
super(K8sRunner, self).__init__()
self.service_name = utils.get_unique_name()
self.host = None
self.port = default_port
self.hostname = None
self.env_value = None
def init_env(self, server_config, server_host, deploy_mode, image_type, image_tag):
"""
Deploy start server with using helm and clean up env.
If deploy or start failed
"""
logger.debug("Tests run on server host:")
logger.debug(server_host)
self.hostname = server_host
# update values
helm_path = os.path.join(os.getcwd(), "../milvus-helm/charts/milvus")
values_file_path = helm_path+"/values.yaml"
if not os.path.exists(values_file_path):
raise Exception("File %s not existed" % values_file_path)
if server_config:
utils.update_values(values_file_path, deploy_mode, server_host, server_config)
try:
logger.debug("Start install server")
self.host = utils.helm_install_server(helm_path, deploy_mode, image_tag, image_type, self.service_name, namespace)
except Exception as e:
logger.error("Helm install server failed: %s" % (str(e)))
logger.error(traceback.format_exc())
logger.debug(server_config)
self.clean_up()
return False
# for debugging
if not self.host:
logger.error("Helm install server failed")
self.clean_up()
return False
return True
def clean_up(self):
"""
Stop server with using helm.
"""
logger.debug("Start clean up: %s" % self.service_name)
utils.helm_del_server(self.service_name, namespace)
def report_wrapper(self, milvus_instance, env_value, hostname, collection_info, index_info, search_params, run_params=None):
"""
upload test result
"""
metric = Metric()
metric.set_run_id(timestamp)
metric.env = Env(env_value)
metric.env.OMP_NUM_THREADS = 0
metric.hardware = Hardware(name=hostname)
server_version = milvus_instance.get_server_version()
server_mode = milvus_instance.get_server_mode()
commit = milvus_instance.get_server_commit()
metric.server = Server(version=server_version, mode=server_mode, build_commit=commit)
metric.collection = collection_info
metric.index = index_info
metric.search = search_params
metric.run_params = run_params
return metric
def run(self, run_type, collection):
"""
override runner.run
"""
logger.debug(run_type)
logger.debug(collection)
collection_name = collection["collection_name"] if "collection_name" in collection else None
milvus_instance = MilvusClient(collection_name=collection_name, host=self.host)
self.env_value = milvus_instance.get_server_config()
# ugly implemention
# remove some parts of result before uploading results
self.env_value.pop("logs")
if milvus_instance.get_server_mode() == "CPU":
if "gpu" in self.env_value:
self.env_value.pop("gpu")
elif "cache.enable" in self.env_value["gpu"]:
self.env_value["gpu"].pop("cache.enable")
self.env_value.pop("network")
if run_type == "insert_performance":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
ni_per = collection["ni_per"]
build_index = collection["build_index"]
if milvus_instance.exists_collection():
milvus_instance.drop()
time.sleep(10)
index_info = {}
search_params = {}
milvus_instance.create_collection(collection_name, dimension, index_file_size, metric_type)
if build_index is True:
index_type = collection["index_type"]
index_param = collection["index_param"]
index_info = {
"index_type": index_type,
"index_param": index_param
}
milvus_instance.create_index(index_type, index_param)
logger.debug(milvus_instance.describe_index())
res = self.do_insert(milvus_instance, collection_name, data_type, dimension, collection_size, ni_per)
logger.info(res)
if "flush" in collection and collection["flush"] == "no":
logger.debug("No manual flush")
else:
milvus_instance.flush()
logger.debug(milvus_instance.count())
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": collection_name
}
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_params)
metric.metrics = {
"type": run_type,
"value": {
"total_time": res["total_time"],
"qps": res["qps"],
"ni_time": res["ni_time"]
}
}
report(metric)
if build_index is True:
logger.debug("Start build index for last file")
milvus_instance.create_index(index_type, index_param)
logger.debug(milvus_instance.describe_index())
elif run_type == "insert_debug_performance":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
ni_per = collection["ni_per"]
if milvus_instance.exists_collection():
milvus_instance.drop()
time.sleep(10)
index_info = {}
search_params = {}
milvus_instance.create_collection(collection_name, dimension, index_file_size, metric_type)
insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(ni_per)]
start_time = time.time()
i = 0
while time.time() < start_time + 2 * 24 * 3600:
i = i + 1
logger.debug(i)
logger.debug("Row count: %d" % milvus_instance.count())
milvus_instance.insert(insert_vectors)
time.sleep(0.1)
elif run_type == "insert_performance_multi_collections":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
ni_per = collection["ni_per"]
build_index = collection["build_index"]
if milvus_instance.exists_collection():
milvus_instance.drop()
time.sleep(10)
index_info = {}
search_params = {}
milvus_instance.create_collection(collection_name, dimension, index_file_size, metric_type)
if build_index is True:
index_type = collection["index_type"]
index_param = collection["index_param"]
index_info = {
"index_type": index_type,
"index_param": index_param
}
milvus_instance.create_index(index_type, index_param)
logger.debug(milvus_instance.describe_index())
res = self.do_insert(milvus_instance, collection_name, data_type, dimension, collection_size, ni_per)
logger.info(res)
milvus_instance.flush()
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": collection_name
}
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_params)
metric.metrics = {
"type": run_type,
"value": {
"total_time": res["total_time"],
"qps": res["qps"],
"ni_time": res["ni_time"]
}
}
report(metric)
if build_index is True:
logger.debug("Start build index for last file")
milvus_instance.create_index(index_type, index_param)
logger.debug(milvus_instance.describe_index())
elif run_type == "insert_flush_performance":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
ni_per = collection["ni_per"]
if milvus_instance.exists_collection():
milvus_instance.drop()
time.sleep(10)
index_info = {}
search_params = {}
milvus_instance.create_collection(collection_name, dimension, index_file_size, metric_type)
res = self.do_insert(milvus_instance, collection_name, data_type, dimension, collection_size, ni_per)
logger.info(res)
logger.debug(milvus_instance.count())
start_time = time.time()
milvus_instance.flush()
end_time = time.time()
logger.debug(milvus_instance.count())
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": collection_name
}
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_params)
metric.metrics = {
"type": run_type,
"value": {
"flush_time": round(end_time - start_time, 1)
}
}
report(metric)
elif run_type == "build_performance":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
index_type = collection["index_type"]
index_param = collection["index_param"]
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"index_file_size": index_file_size,
"dataset_name": collection_name
}
index_info = {
"index_type": index_type,
"index_param": index_param
}
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
search_params = {}
start_time = time.time()
# drop index
logger.debug("Drop index")
milvus_instance.drop_index()
start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
milvus_instance.create_index(index_type, index_param)
logger.debug(milvus_instance.describe_index())
logger.debug(milvus_instance.count())
end_time = time.time()
end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_params)
metric.metrics = {
"type": "build_performance",
"value": {
"build_time": round(end_time - start_time, 1),
"start_mem_usage": start_mem_usage,
"end_mem_usage": end_mem_usage,
"diff_mem": end_mem_usage - start_mem_usage
}
}
report(metric)
elif run_type == "delete_performance":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
ni_per = collection["ni_per"]
search_params = {}
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": collection_name
}
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
length = milvus_instance.count()
logger.info(length)
index_info = milvus_instance.describe_index()
logger.info(index_info)
ids = [i for i in range(length)]
loops = int(length / ni_per)
milvus_instance.preload_collection()
start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
start_time = time.time()
for i in range(loops):
delete_ids = ids[i*ni_per : i*ni_per+ni_per]
logger.debug("Delete %d - %d" % (delete_ids[0], delete_ids[-1]))
milvus_instance.delete(delete_ids)
# milvus_instance.flush()
logger.debug("Table row counts: %d" % milvus_instance.count())
logger.debug("Table row counts: %d" % milvus_instance.count())
milvus_instance.flush()
end_time = time.time()
end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
logger.debug("Table row counts: %d" % milvus_instance.count())
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_params)
metric.metrics = {
"type": "delete_performance",
"value": {
"delete_time": round(end_time - start_time, 1),
"start_mem_usage": start_mem_usage,
"end_mem_usage": end_mem_usage,
"diff_mem": end_mem_usage - start_mem_usage
}
}
report(metric)
elif run_type == "get_ids_performance":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
ids_length_per_segment = collection["ids_length_per_segment"]
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"index_file_size": index_file_size,
"dataset_name": collection_name
}
search_params = {}
logger.info(milvus_instance.count())
index_info = milvus_instance.describe_index()
logger.info(index_info)
for ids_num in ids_length_per_segment:
segment_num, get_ids = milvus_instance.get_rand_ids_each_segment(ids_num)
start_time = time.time()
_ = milvus_instance.get_entities(get_ids)
total_time = time.time() - start_time
avg_time = total_time / segment_num
run_params = {"ids_num": ids_num}
logger.info("Segment num: %d, ids num per segment: %d, run_time: %f" % (segment_num, ids_num, total_time))
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_params, run_params=run_params)
metric.metrics = {
"type": run_type,
"value": {
"total_time": round(total_time, 1),
"avg_time": round(avg_time, 1)
}
}
report(metric)
elif run_type == "search_performance":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
run_count = collection["run_count"]
top_ks = collection["top_ks"]
nqs = collection["nqs"]
search_params = collection["search_params"]
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"index_file_size": index_file_size,
"dataset_name": collection_name
}
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
logger.info(milvus_instance.count())
index_info = milvus_instance.describe_index()
logger.info(index_info)
milvus_instance.preload_collection()
logger.info("Start warm up query")
res = self.do_query(milvus_instance, collection_name, [1], [1], 2, search_param=search_params[0])
logger.info("End warm up query")
for search_param in search_params:
logger.info("Search param: %s" % json.dumps(search_param))
res = self.do_query(milvus_instance, collection_name, top_ks, nqs, run_count, search_param)
headers = ["Nq/Top-k"]
headers.extend([str(top_k) for top_k in top_ks])
logger.info("Search param: %s" % json.dumps(search_param))
utils.print_table(headers, nqs, res)
for index_nq, nq in enumerate(nqs):
for index_top_k, top_k in enumerate(top_ks):
search_param_group = {
"nq": nq,
"topk": top_k,
"search_param": search_param
}
search_time = res[index_nq][index_top_k]
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_param_group)
metric.metrics = {
"type": "search_performance",
"value": {
"search_time": search_time
}
}
report(metric)
elif run_type == "locust_search_performance":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(
collection_name)
### clear db
### spawn locust requests
collection_num = collection["collection_num"]
task = collection["task"]
# . generate task code
task_file = utils.get_unique_name()
task_file_script = task_file + '.py'
task_file_csv = task_file + '_stats.csv'
task_type = task["type"]
connection_type = "single"
connection_num = task["connection_num"]
if connection_num > 1:
connection_type = "multi"
clients_num = task["clients_num"]
hatch_rate = task["hatch_rate"]
during_time = task["during_time"]
def_name = task_type
task_params = task["params"]
collection_names = []
for i in range(collection_num):
suffix = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(5))
collection_names.append(collection_name + "_" + suffix)
# #####
ni_per = collection["ni_per"]
build_index = collection["build_index"]
# TODO: debug
for c_name in collection_names:
milvus_instance = MilvusClient(collection_name=c_name, host=self.host, port=self.port)
if milvus_instance.exists_collection(collection_name=c_name):
milvus_instance.drop(name=c_name)
time.sleep(10)
milvus_instance.create_collection(c_name, dimension, index_file_size, metric_type)
index_info = {
"build_index": build_index
}
if build_index is True:
index_type = collection["index_type"]
index_param = collection["index_param"]
index_info.update({
"index_type": index_type,
"index_param": index_param
})
milvus_instance.create_index(index_type, index_param)
logger.debug(milvus_instance.describe_index())
res = self.do_insert(milvus_instance, c_name, data_type, dimension, collection_size, ni_per)
logger.info(res)
if "flush" in collection and collection["flush"] == "no":
logger.debug("No manual flush")
else:
milvus_instance.flush()
logger.debug("Table row counts: %d" % milvus_instance.count(name=c_name))
if build_index is True:
logger.debug("Start build index for last file")
milvus_instance.create_index(index_type, index_param)
logger.debug(milvus_instance.describe_index())
code_str = """
import random
import string
from locust import User, task, between
from locust_task import MilvusTask
from client import MilvusClient
host = '%s'
port = %s
dim = %s
connection_type = '%s'
collection_names = %s
m = MilvusClient(host=host, port=port)
def get_collection_name():
return random.choice(collection_names)
def get_client(collection_name):
if connection_type == 'single':
return MilvusTask(m=m)
elif connection_type == 'multi':
return MilvusTask(connection_type='multi', host=host, port=port, collection_name=collection_name)
class QueryTask(User):
wait_time = between(0.001, 0.002)
@task()
def %s(self):
top_k = %s
X = [[random.random() for i in range(dim)] for i in range(%s)]
search_param = %s
collection_name = get_collection_name()
client = get_client(collection_name)
client.query(X, top_k, search_param, collection_name=collection_name)
""" % (self.host, self.port, dimension, connection_type, collection_names, def_name, task_params["top_k"], task_params["nq"], task_params["search_param"])
with open(task_file_script, 'w+') as fd:
fd.write(code_str)
locust_cmd = "locust -f %s --headless --csv=%s -u %d -r %d -t %s" % (
task_file_script,
task_file,
clients_num,
hatch_rate,
during_time)
logger.info(locust_cmd)
try:
res = os.system(locust_cmd)
except Exception as e:
logger.error(str(e))
return
# . retrieve and collect test statistics
locust_stats = None
with open(task_file_csv, newline='') as fd:
dr = csv.DictReader(fd)
for row in dr:
if row["Name"] != "Aggregated":
continue
locust_stats = row
logger.info(locust_stats)
# clean up temp files
search_params = {
"top_k": task_params["top_k"],
"nq": task_params["nq"],
"nprobe": task_params["search_param"]["nprobe"]
}
run_params = {
"connection_num": connection_num,
"clients_num": clients_num,
"hatch_rate": hatch_rate,
"during_time": during_time
}
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"index_file_size": index_file_size,
"dataset_name": collection_name
}
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_params, run_params)
metric.metrics = {
"type": run_type,
"value": {
"during_time": during_time,
"request_count": int(locust_stats["Request Count"]),
"failure_count": int(locust_stats["Failure Count"]),
"qps": locust_stats["Requests/s"],
"min_response_time": int(locust_stats["Min Response Time"]),
"max_response_time": int(locust_stats["Max Response Time"]),
"median_response_time": int(locust_stats["Median Response Time"]),
"avg_response_time": int(locust_stats["Average Response Time"])
}
}
report(metric)
elif run_type == "search_ids_stability":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
search_params = collection["search_params"]
during_time = collection["during_time"]
ids_length = collection["ids_length"]
ids = collection["ids"]
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"index_file_size": index_file_size,
"dataset_name": collection_name
}
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
logger.info(milvus_instance.count())
index_info = milvus_instance.describe_index()
logger.info(index_info)
g_top_k = int(collection["top_ks"].split("-")[1])
l_top_k = int(collection["top_ks"].split("-")[0])
# g_id = int(ids.split("-")[1])
# l_id = int(ids.split("-")[0])
g_id_length = int(ids_length.split("-")[1])
l_id_length = int(ids_length.split("-")[0])
milvus_instance.preload_collection()
start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
logger.debug(start_mem_usage)
start_time = time.time()
while time.time() < start_time + during_time * 60:
search_param = {}
top_k = random.randint(l_top_k, g_top_k)
ids_num = random.randint(l_id_length, g_id_length)
ids_param = [random.randint(l_id_length, g_id_length) for _ in range(ids_num)]
for k, v in search_params.items():
search_param[k] = random.randint(int(v.split("-")[0]), int(v.split("-")[1]))
logger.debug("Query top-k: %d, ids_num: %d, param: %s" % (top_k, ids_num, json.dumps(search_param)))
result = milvus_instance.query_ids(top_k, ids_param, search_param=search_param)
end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, {})
metric.metrics = {
"type": "search_ids_stability",
"value": {
"during_time": during_time,
"start_mem_usage": start_mem_usage,
"end_mem_usage": end_mem_usage,
"diff_mem": end_mem_usage - start_mem_usage
}
}
report(metric)
# for sift/deep datasets
# TODO: enable
elif run_type == "accuracy":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
search_params = collection["search_params"]
# mapping to search param list
search_params = self.generate_combinations(search_params)
top_ks = collection["top_ks"]
nqs = collection["nqs"]
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"index_file_size": index_file_size,
"dataset_name": collection_name
}
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
logger.info(milvus_instance.count())
index_info = milvus_instance.describe_index()
logger.info(index_info)
milvus_instance.preload_collection()
true_ids_all = self.get_groundtruth_ids(collection_size)
for search_param in search_params:
for top_k in top_ks:
for nq in nqs:
# total = 0
search_param_group = {
"nq": nq,
"topk": top_k,
"search_param": search_param
}
logger.info("Query params: %s" % json.dumps(search_param_group))
result_ids, _ = self.do_query_ids(milvus_instance, collection_name, top_k, nq, search_param=search_param)
acc_value = self.get_recall_value(true_ids_all[:nq, :top_k].tolist(), result_ids)
logger.info("Query accuracy: %s" % acc_value)
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_param_group)
metric.metrics = {
"type": "accuracy",
"value": {
"acc": acc_value
}
}
report(metric)
elif run_type == "ann_accuracy":
hdf5_source_file = collection["source_file"]
collection_name = collection["collection_name"]
index_file_sizes = collection["index_file_sizes"]
index_types = collection["index_types"]
index_params = collection["index_params"]
top_ks = collection["top_ks"]
nqs = collection["nqs"]
search_params = collection["search_params"]
# mapping to search param list
search_params = self.generate_combinations(search_params)
# mapping to index param list
index_params = self.generate_combinations(index_params)
data_type, dimension, metric_type = parser.parse_ann_collection_name(collection_name)
dataset = utils.get_dataset(hdf5_source_file)
true_ids = np.array(dataset["neighbors"])
for index_file_size in index_file_sizes:
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"index_file_size": index_file_size,
"dataset_name": collection_name
}
if milvus_instance.exists_collection(collection_name):
logger.info("Re-create collection: %s" % collection_name)
milvus_instance.drop()
time.sleep(DELETE_INTERVAL_TIME)
milvus_instance.create_collection(collection_name, dimension, index_file_size, metric_type)
logger.info(milvus_instance.describe())
insert_vectors = self.normalize(metric_type, np.array(dataset["train"]))
# Insert batch once
# milvus_instance.insert(insert_vectors)
loops = len(insert_vectors) // INSERT_INTERVAL + 1
for i in range(loops):
start = i*INSERT_INTERVAL
end = min((i+1)*INSERT_INTERVAL, len(insert_vectors))
tmp_vectors = insert_vectors[start:end]
if start < end:
if not isinstance(tmp_vectors, list):
milvus_instance.insert(tmp_vectors.tolist(), ids=[i for i in range(start, end)])
else:
milvus_instance.insert(tmp_vectors, ids=[i for i in range(start, end)])
milvus_instance.flush()
logger.info("Table: %s, row count: %s" % (collection_name, milvus_instance.count()))
if milvus_instance.count() != len(insert_vectors):
logger.error("Table row count is not equal to insert vectors")
return
for index_type in index_types:
for index_param in index_params:
logger.debug("Building index with param: %s" % json.dumps(index_param))
milvus_instance.create_index(index_type, index_param=index_param)
logger.info(milvus_instance.describe_index())
logger.info("Start preload collection: %s" % collection_name)
milvus_instance.preload_collection()
index_info = {
"index_type": index_type,
"index_param": index_param
}
logger.debug(index_info)
for search_param in search_params:
for nq in nqs:
query_vectors = self.normalize(metric_type, np.array(dataset["test"][:nq]))
for top_k in top_ks:
search_param_group = {
"nq": len(query_vectors),
"topk": top_k,
"search_param": search_param
}
logger.debug(search_param_group)
if not isinstance(query_vectors, list):
result = milvus_instance.query(query_vectors.tolist(), top_k, search_param=search_param)
else:
result = milvus_instance.query(query_vectors, top_k, search_param=search_param)
if len(result):
logger.debug(len(result))
logger.debug(len(result[0]))
result_ids = result.id_array
acc_value = self.get_recall_value(true_ids[:nq, :top_k].tolist(), result_ids)
logger.info("Query ann_accuracy: %s" % acc_value)
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_param_group)
metric.metrics = {
"type": "ann_accuracy",
"value": {
"acc": acc_value
}
}
report(metric)
elif run_type == "search_stability":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
search_params = collection["search_params"]
during_time = collection["during_time"]
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": collection_name
}
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
logger.info(milvus_instance.count())
index_info = milvus_instance.describe_index()
logger.info(index_info)
g_top_k = int(collection["top_ks"].split("-")[1])
g_nq = int(collection["nqs"].split("-")[1])
l_top_k = int(collection["top_ks"].split("-")[0])
l_nq = int(collection["nqs"].split("-")[0])
milvus_instance.preload_collection()
start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
logger.debug(start_mem_usage)
start_row_count = milvus_instance.count()
logger.debug(milvus_instance.describe_index())
logger.info(start_row_count)
start_time = time.time()
while time.time() < start_time + during_time * 60:
search_param = {}
top_k = random.randint(l_top_k, g_top_k)
nq = random.randint(l_nq, g_nq)
for k, v in search_params.items():
search_param[k] = random.randint(int(v.split("-")[0]), int(v.split("-")[1]))
query_vectors = [[random.random() for _ in range(dimension)] for _ in range(nq)]
logger.debug("Query nq: %d, top-k: %d, param: %s" % (nq, top_k, json.dumps(search_param)))
result = milvus_instance.query(query_vectors, top_k, search_param=search_param)
end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, {})
metric.metrics = {
"type": "search_stability",
"value": {
"during_time": during_time,
"start_mem_usage": start_mem_usage,
"end_mem_usage": end_mem_usage,
"diff_mem": end_mem_usage - start_mem_usage
}
}
report(metric)
elif run_type == "loop_stability":
# init data
milvus_instance.clean_db()
pull_interval = collection["pull_interval"]
collection_num = collection["collection_num"]
concurrent = collection["concurrent"] if "concurrent" in collection else False
concurrent_num = collection_num
dimension = collection["dimension"] if "dimension" in collection else 128
insert_xb = collection["insert_xb"] if "insert_xb" in collection else 100000
index_types = collection["index_types"] if "index_types" in collection else ['ivf_sq8']
index_param = {"nlist": 2048}
collection_names = []
milvus_instances_map = {}
insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(insert_xb)]
for i in range(collection_num):
name = utils.get_unique_name(prefix="collection_")
collection_names.append(name)
metric_type = random.choice(["l2", "ip"])
index_file_size = random.randint(10, 20)
milvus_instance.create_collection(name, dimension, index_file_size, metric_type)
milvus_instance = MilvusClient(collection_name=name, host=self.host)
index_type = random.choice(index_types)
milvus_instance.create_index(index_type, index_param=index_param)
logger.info(milvus_instance.describe_index())
insert_vectors = utils.normalize(metric_type, insert_vectors)
milvus_instance.insert(insert_vectors)
milvus_instance.flush()
milvus_instances_map.update({name: milvus_instance})
logger.info(milvus_instance.describe_index())
logger.info(milvus_instance.describe())
# loop time unit: min -> s
pull_interval_seconds = pull_interval * 60
tasks = ["insert_rand", "delete_rand", "query_rand", "flush", "compact"]
i = 1
while True:
logger.info("Loop time: %d" % i)
start_time = time.time()
while time.time() - start_time < pull_interval_seconds:
if concurrent:
mp = []
for _ in range(concurrent_num):
tmp_collection_name = random.choice(collection_names)
task_name = random.choice(tasks)
mp.append((tmp_collection_name, task_name))
with futures.ThreadPoolExecutor(max_workers=concurrent_num) as executor:
future_results = {executor.submit(getattr(milvus_instances_map[mp[j][0]], mp[j][1])): j for j in range(concurrent_num)}
for future in futures.as_completed(future_results):
future.result()
else:
tmp_collection_name = random.choice(collection_names)
task_name = random.choice(tasks)
logger.info(tmp_collection_name)
logger.info(task_name)
task_run = getattr(milvus_instances_map[tmp_collection_name], task_name)
task_run()
logger.debug("Restart server")
utils.restart_server(self.service_name, namespace)
# new connection
for name in collection_names:
milvus_instance = MilvusClient(collection_name=name, host=self.host)
milvus_instances_map.update({name: milvus_instance})
i = i + 1
elif run_type == "stability":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
search_params = collection["search_params"]
insert_xb = collection["insert_xb"]
insert_interval = collection["insert_interval"]
delete_xb = collection["delete_xb"]
during_time = collection["during_time"]
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": collection_name
}
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
logger.info(milvus_instance.count())
index_info = milvus_instance.describe_index()
logger.info(index_info)
g_top_k = int(collection["top_ks"].split("-")[1])
g_nq = int(collection["nqs"].split("-")[1])
l_top_k = int(collection["top_ks"].split("-")[0])
l_nq = int(collection["nqs"].split("-")[0])
milvus_instance.preload_collection()
start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
start_row_count = milvus_instance.count()
logger.debug(milvus_instance.describe_index())
logger.info(start_row_count)
start_time = time.time()
i = 0
ids = []
insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(insert_xb)]
query_vectors = [[random.random() for _ in range(dimension)] for _ in range(10000)]
while time.time() < start_time + during_time * 60:
i = i + 1
for j in range(insert_interval):
top_k = random.randint(l_top_k, g_top_k)
nq = random.randint(l_nq, g_nq)
search_param = {}
for k, v in search_params.items():
search_param[k] = random.randint(int(v.split("-")[0]), int(v.split("-")[1]))
logger.debug("Query nq: %d, top-k: %d, param: %s" % (nq, top_k, json.dumps(search_param)))
result = milvus_instance.query(query_vectors[0:nq], top_k, search_param=search_param)
count = milvus_instance.count()
insert_ids = [(count+x) for x in range(len(insert_vectors))]
ids.extend(insert_ids)
status, res = milvus_instance.insert(insert_vectors, ids=insert_ids)
logger.debug("%d, row_count: %d" % (i, milvus_instance.count()))
milvus_instance.delete(ids[-delete_xb:])
milvus_instance.flush()
milvus_instance.compact()
end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
end_row_count = milvus_instance.count()
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, {})
metric.metrics = {
"type": "stability",
"value": {
"during_time": during_time,
"start_mem_usage": start_mem_usage,
"end_mem_usage": end_mem_usage,
"diff_mem": end_mem_usage - start_mem_usage,
"row_count_increments": end_row_count - start_row_count
}
}
report(metric)
elif run_type == "locust_mix_performance":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(
collection_name)
ni_per = collection["ni_per"]
build_index = collection["build_index"]
# # TODO: debug
if milvus_instance.exists_collection():
milvus_instance.drop()
time.sleep(10)
index_info = {}
search_params = {}
milvus_instance.create_collection(collection_name, dimension, index_file_size, metric_type)
if build_index is True:
index_type = collection["index_type"]
index_param = collection["index_param"]
index_info = {
"index_tyoe": index_type,
"index_param": index_param
}
milvus_instance.create_index(index_type, index_param)
logger.debug(milvus_instance.describe_index())
res = self.do_insert(milvus_instance, collection_name, data_type, dimension, collection_size, ni_per)
logger.info(res)
if "flush" in collection and collection["flush"] == "no":
logger.debug("No manual flush")
else:
milvus_instance.flush()
if build_index is True:
logger.debug("Start build index for last file")
milvus_instance.create_index(index_type, index_param)
logger.debug(milvus_instance.describe_index())
### spawn locust requests
task = collection["tasks"]
# generate task code
task_file = utils.get_unique_name()
task_file_script = task_file + '.py'
task_file_csv = task_file + '_stats.csv'
task_types = task["types"]
connection_type = "single"
connection_num = task["connection_num"]
if connection_num > 1:
connection_type = "multi"
clients_num = task["clients_num"]
hatch_rate = task["hatch_rate"]
during_time = task["during_time"]
def_strs = ""
for task_type in task_types:
_type = task_type["type"]
weight = task_type["weight"]
if _type == "flush":
def_str = """
@task(%d)
def flush(self):
client = get_client(collection_name)
client.flush(collection_name=collection_name)
""" % weight
if _type == "compact":
def_str = """
@task(%d)
def compact(self):
client = get_client(collection_name)
client.compact(collection_name)
""" % weight
if _type == "query":
def_str = """
@task(%d)
def query(self):
client = get_client(collection_name)
params = %s
X = [[random.random() for i in range(dim)] for i in range(params["nq"])]
client.query(X, params["top_k"], params["search_param"], collection_name=collection_name)
""" % (weight, task_type["params"])
if _type == "insert":
def_str = """
@task(%d)
def insert(self):
client = get_client(collection_name)
params = %s
ids = [random.randint(10, 1000000) for i in range(params["nb"])]
X = [[random.random() for i in range(dim)] for i in range(params["nb"])]
client.insert(X,ids=ids, collection_name=collection_name)
""" % (weight, task_type["params"])
if _type == "delete":
def_str = """
@task(%d)
def delete(self):
client = get_client(collection_name)
ids = [random.randint(1, 1000000) for i in range(1)]
client.delete(ids, collection_name)
""" % weight
def_strs += def_str
code_str = """
import random
import json
from locust import User, task, between
from locust_task import MilvusTask
from client import MilvusClient
host = '%s'
port = %s
collection_name = '%s'
dim = %s
connection_type = '%s'
m = MilvusClient(host=host, port=port)
def get_client(collection_name):
if connection_type == 'single':
return MilvusTask(m=m)
elif connection_type == 'multi':
return MilvusTask(connection_type='multi', host=host, port=port, collection_name=collection_name)
class MixTask(User):
wait_time = between(0.001, 0.002)
%s
""" % (self.host, self.port, collection_name, dimension, connection_type, def_strs)
print(def_strs)
with open(task_file_script, "w+") as fd:
fd.write(code_str)
locust_cmd = "locust -f %s --headless --csv=%s -u %d -r %d -t %s" % (
task_file_script,
task_file,
clients_num,
hatch_rate,
during_time)
logger.info(locust_cmd)
try:
res = os.system(locust_cmd)
except Exception as e:
logger.error(str(e))
return
# . retrieve and collect test statistics
locust_stats = None
with open(task_file_csv, newline='') as fd:
dr = csv.DictReader(fd)
for row in dr:
if row["Name"] != "Aggregated":
continue
locust_stats = row
logger.info(locust_stats)
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": collection_name
}
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_params)
metric.metrics = {
"type": run_type,
"value": {
"during_time": during_time,
"request_count": int(locust_stats["Request Count"]),
"failure_count": int(locust_stats["Failure Count"]),
"qps": locust_stats["Requests/s"],
"min_response_time": int(locust_stats["Min Response Time"]),
"max_response_time": int(locust_stats["Max Response Time"]),
"median_response_time": int(locust_stats["Median Response Time"]),
"avg_response_time": int(locust_stats["Average Response Time"])
}
}
report(metric)
else:
logger.warning("Run type: %s not defined" % run_type)
return
logger.debug("Test finished")
```
#### File: tests/milvus_benchmark/locust_task.py
```python
import time
from locust import events
from client import MilvusClient
class MilvusTask(object):
def __init__(self, connection_type="single", **kwargs):
"""
Generate milvus client for locust.
To make sure we can use the same function name in client as task name in Taskset/User.
Params: connection_type, single/multi is optional
other args: host/port/collection_name
"""
self.request_type = "grpc"
if connection_type == "single":
self.m = kwargs.get("m")
elif connection_type == "multi":
host = kwargs.get("host")
port = kwargs.get("port")
collection_name = kwargs.get("collection_name")
self.m = MilvusClient(host=host, port=port, collection_name=collection_name)
def __getattr__(self, name):
"""
Register success and failure event with using locust.events.
Make sure the task function name in locust equals to te name of function in MilvusClient
"""
func = getattr(self.m, name)
def wrapper(*args, **kwargs):
start_time = time.time()
try:
_ = func(*args, **kwargs)
total_time = int((time.time() - start_time) * 1000)
events.request_success.fire(request_type=self.request_type, name=name, response_time=total_time,
response_length=0)
except Exception as e:
total_time = int((time.time() - start_time) * 1000)
events.request_failure.fire(request_type=self.request_type, name=name, response_time=total_time,
exception=e, response_length=0)
return wrapper
```
#### File: tests/milvus_benchmark/search_task.py
```python
import random, string
from locust import User, task, between
from locust_task import MilvusTask
from client import MilvusClient
connection_type = "single"
host = "192.168.1.29"
port = 19531
collection_name = "sift_128_euclidean"
dim = 128
m = MilvusClient(host=host, port=port, collection_name=collection_name)
class QueryTask(User):
wait_time = between(0.001, 0.002)
print("in query task")
if connection_type == "single":
client = MilvusTask(m=m)
else:
client = MilvusTask(host=host, port=port, collection_name=collection_name)
# @task()
# def query(self):
# top_k = 10
# X = [[random.random() for i in range(dim)] for i in range(1)]
# search_param = {"nprobe": 16}
# self.client.query(X, top_k, search_param)
@task(1)
def test_create(self):
tag_name = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
self.client.create_partition(tag_name)
@task(1)
def test_drop(self):
tags = m.list_partitions()
tag = random.choice(tags)
self.client.drop_partition(tag.tag)
```
#### File: milvus_python_test/entity/test_get_entity_by_id.py
```python
import time
import random
import pdb
import threading
import logging
from multiprocessing import Pool, Process
import concurrent.futures
import pytest
from milvus import IndexType, MetricType
from utils import *
dim = 128
index_file_size = 10
collection_id = "get_entity_by_id"
DELETE_TIMEOUT = 60
nprobe = 1
tag = "1970-01-01"
top_k = 1
nb = 6000
tag = "tag"
class TestGetBase:
"""
******************************************************************
The following cases are used to test .get_entity_by_id` function
******************************************************************
"""
def test_get_vector_A(self, connect, collection):
'''
target: test.get_entity_by_id
method: add vector, and get
expected: status ok, vector returned
'''
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
assert status.OK()
status = connect.flush([collection])
assert status.OK()
status, res = connect.get_entity_by_id(collection, ids)
assert status.OK()
assert_equal_vector(res[0], vector[0])
def test_get_vector_B(self, connect, collection):
'''
target: test.get_entity_by_id
method: add vector, and get
expected: status ok, vector returned
'''
vectors = gen_vectors(nb, dim)
status, ids = connect.insert(collection, vectors)
assert status.OK()
status = connect.flush([collection])
assert status.OK()
length = 100
status, res = connect.get_entity_by_id(collection, ids[:length])
assert status.OK()
for i in range(length):
assert_equal_vector(res[i], vectors[i])
def test_get_vector_C_limit(self, connect, collection, args):
'''
target: test.get_entity_by_id
method: add vector, and get, limit > 1000
expected: status ok, vector returned
'''
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
vectors = gen_vectors(nb, dim)
status, ids = connect.insert(collection, vectors)
assert status.OK()
status = connect.flush([collection])
assert status.OK()
status, res = connect.get_entity_by_id(collection, ids)
assert not status.OK()
def test_get_vector_partition(self, connect, collection):
'''
target: test.get_entity_by_id
method: add vector, and get
expected: status ok, vector returned
'''
vectors = gen_vectors(nb, dim)
status = connect.create_partition(collection, tag)
assert status.OK()
status, ids = connect.insert(collection, vectors, partition_tag=tag)
assert status.OK()
status = connect.flush([collection])
assert status.OK()
length = 100
status, res = connect.get_entity_by_id(collection, ids[:length])
assert status.OK()
for i in range(length):
assert_equal_vector(res[i], vectors[i])
def test_get_vector_multi_same_ids(self, connect, collection):
'''
target: test.get_entity_by_id
method: add vectors, with the same id, get vector by the given id
expected: status ok, get one vector
'''
vectors = gen_vectors(nb, dim)
ids = [i for i in range(nb)]
ids[1] = 0; ids[-1] = 0
status, ids = connect.insert(collection, vectors, ids=ids)
status = connect.flush([collection])
assert status.OK()
status, res = connect.get_entity_by_id(collection, [0])
assert status.OK()
assert_equal_vector(res[0], vectors[0])
@pytest.fixture(
scope="function",
params=[
1,
10,
100,
1000,
-1
],
)
def get_id(self, request):
yield request.param
def test_get_vector_after_delete(self, connect, collection, get_id):
'''
target: test.get_entity_by_id
method: add vectors, and delete, get vector by the given id
expected: status ok, get one vector
'''
vectors = gen_vectors(nb, dim)
status, ids = connect.insert(collection, vectors)
assert status.OK()
status = connect.flush([collection])
assert status.OK()
id = get_id
status = connect.delete_entity_by_id(collection, [ids[id]])
assert status.OK()
status = connect.flush([collection])
assert status.OK()
status, res = connect.get_entity_by_id(collection, [ids[id]])
assert status.OK()
assert not len(res[0])
def test_get_vector_after_delete_with_partition(self, connect, collection, get_id):
'''
target: test.get_entity_by_id
method: add vectors into partition, and delete, get vector by the given id
expected: status ok, get one vector
'''
vectors = gen_vectors(nb, dim)
status = connect.create_partition(collection, tag)
status, ids = connect.insert(collection, vectors, partition_tag=tag)
assert status.OK()
status = connect.flush([collection])
assert status.OK()
id = get_id
status = connect.delete_entity_by_id(collection, [ids[id]])
assert status.OK()
status = connect.flush([collection])
assert status.OK()
status, res = connect.get_entity_by_id(collection, [ids[id]])
assert status.OK()
assert not len(res[0])
def test_get_vector_id_not_exised(self, connect, collection):
'''
target: test get vector, params vector_id not existed
method: add vector and get
expected: status ok, empty result
'''
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
assert status.OK()
status = connect.flush([collection])
assert status.OK()
status, res = connect.get_entity_by_id(collection, [1])
assert status.OK()
assert not len(res[0])
def test_get_vector_collection_not_existed(self, connect, collection):
'''
target: test get vector, params collection_name not existed
method: add vector and get
expected: status not ok
'''
vector = gen_single_vector(dim)
status, ids = connect.insert(collection, vector)
assert status.OK()
status = connect.flush([collection])
assert status.OK()
collection_new = gen_unique_str()
status, res = connect.get_entity_by_id(collection_new, [1])
assert not status.OK()
def test_get_vector_by_id_multithreads(self, connect, collection):
vectors = gen_vectors(nb, dim)
status, ids = connect.insert(collection, vectors)
status = connect.flush([collection])
assert status.OK()
get_id = ids[100:200]
def get():
status, res = connect.get_entity_by_id(collection, get_id)
assert status.OK()
assert len(res) == len(get_id)
for i in range(len(res)):
assert_equal_vector(res[i], vectors[100+i])
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_results = {executor.submit(
get): i for i in range(10)}
for future in concurrent.futures.as_completed(future_results):
future.result()
# TODO: autoflush
def _test_get_vector_by_id_after_delete_no_flush(self, connect, collection):
vectors = gen_vectors(nb, dim)
status, ids = connect.insert(collection, vectors)
status = connect.flush([collection])
assert status.OK()
get_id = ids[100:200]
status = connect.delete_entity_by_id(collection, get_id)
assert status.OK()
status, res = connect.get_entity_by_id(collection, get_id)
assert status.OK()
assert len(res) == len(get_id)
for i in range(len(res)):
assert_equal_vector(res[i], vectors[100+i])
class TestGetIndexedVectors:
"""
******************************************************************
The following cases are used to test .get_entity_by_id` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] not in [IndexType.IVF_SQ8, IndexType.IVFLAT, IndexType.FLAT, IndexType.IVF_PQ, IndexType.IVF_SQ8H]:
pytest.skip("Only support index_type: idmap/ivf")
elif str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] in [IndexType.IVF_SQ8H]:
pytest.skip("CPU not support index_type: ivf_sq8h")
return request.param
@pytest.fixture(
scope="function",
params=[
1,
10,
100,
1000,
-1
],
)
def get_id(self, request):
yield request.param
def test_get_vectors_after_index_created(self, connect, collection, get_simple_index, get_id):
'''
target: test get vector after index created
method: add vector, create index and get vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
vectors = gen_vector(nb, dim)
status, ids = connect.insert(collection, vectors)
assert status.OK()
status = connect.flush([collection])
assert status.OK()
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
id = get_id
status, res = connect.get_entity_by_id(collection, [ids[id]])
assert status.OK()
assert_equal_vector(res[0], vectors[id])
def test_get_vector_after_delete(self, connect, collection, get_simple_index, get_id):
'''
target: test.get_entity_by_id
method: add vectors, and delete, get vector by the given id
expected: status ok, get one vector
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
vectors = gen_vectors(nb, dim)
status, ids = connect.insert(collection, vectors)
assert status.OK()
status = connect.flush([collection])
assert status.OK()
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
id = get_id
status = connect.delete_entity_by_id(collection, [ids[id]])
assert status.OK()
status = connect.flush([collection])
assert status.OK()
status, res = connect.get_entity_by_id(collection, [ids[id]])
assert status.OK()
assert not len(res[0])
def test_get_vector_partition(self, connect, collection, get_simple_index, get_id):
'''
target: test.get_entity_by_id
method: add vector, and get
expected: status ok, vector returned
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
vectors = gen_vectors(nb, dim)
status = connect.create_partition(collection, tag)
ids = [i for i in range(nb)]
status, ids = connect.insert(collection, vectors, ids, partition_tag=tag)
assert status.OK()
status = connect.flush([collection])
assert status.OK()
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
id = get_id
status, res = connect.get_entity_by_id(collection, [ids[id]])
assert status.OK()
assert_equal_vector(res[0], vectors[id])
class TestGetBinary:
"""
******************************************************************
The following cases are used to test .get_entity_by_id` function
******************************************************************
"""
def test_get_vector_A(self, connect, jac_collection):
'''
target: test.get_entity_by_id
method: add vector, and get
expected: status ok, vector returned
'''
tmp, vector = gen_binary_vectors(1, dim)
status, ids = connect.insert(jac_collection, vector)
assert status.OK()
status = connect.flush([jac_collection])
assert status.OK()
status, res = connect.get_entity_by_id(jac_collection, [ids[0]])
assert status.OK()
assert_equal_vector(res[0], vector[0])
def test_get_vector_B(self, connect, jac_collection):
'''
target: test.get_entity_by_id
method: add vector, and get
expected: status ok, vector returned
'''
tmp, vectors = gen_binary_vectors(nb, dim)
status, ids = connect.insert(jac_collection, vectors)
assert status.OK()
status = connect.flush([jac_collection])
assert status.OK()
status, res = connect.get_entity_by_id(jac_collection, [ids[0]])
assert status.OK()
assert_equal_vector(res[0], vectors[0])
def test_get_vector_multi_same_ids(self, connect, jac_collection):
'''
target: test.get_entity_by_id
method: add vectors, with the same id, get vector by the given id
expected: status ok, get one vector
'''
tmp, vectors = gen_binary_vectors(nb, dim)
ids = [i for i in range(nb)]
ids[0] = 0; ids[-1] = 0
status, ids = connect.insert(jac_collection, vectors, ids=ids)
status = connect.flush([jac_collection])
assert status.OK()
status, res = connect.get_entity_by_id(jac_collection, [0])
assert status.OK()
assert_equal_vector(res[0], vectors[0])
def test_get_vector_id_not_exised(self, connect, jac_collection):
'''
target: test get vector, params vector_id not existed
method: add vector and get
expected: status ok, empty result
'''
tmp, vector = gen_binary_vectors(1, dim)
status, ids = connect.insert(jac_collection, vector)
assert status.OK()
status = connect.flush([jac_collection])
assert status.OK()
status, res = connect.get_entity_by_id(jac_collection, [1])
assert status.OK()
assert not len(res[0])
def test_get_vector_collection_not_existed(self, connect, jac_collection):
'''
target: test get vector, params collection_name not existed
method: add vector and get
expected: status not ok
'''
tmp, vector = gen_binary_vectors(1, dim)
status, ids = connect.insert(jac_collection, vector)
assert status.OK()
status = connect.flush([jac_collection])
assert status.OK()
collection_new = gen_unique_str()
status, res = connect.get_entity_by_id(collection_new, [1])
assert not status.OK()
def test_get_vector_partition(self, connect, jac_collection):
'''
target: test.get_entity_by_id
method: add vector, and get
expected: status ok, vector returned
'''
tmp, vectors = gen_binary_vectors(nb, dim)
status = connect.create_partition(jac_collection, tag)
status, ids = connect.insert(jac_collection, vectors, partition_tag=tag)
assert status.OK()
status = connect.flush([jac_collection])
assert status.OK()
status, res = connect.get_entity_by_id(jac_collection, [ids[0]])
assert status.OK()
assert_equal_vector(res[0], vectors[0])
class TestGetVectorIdIngalid(object):
single_vector = gen_single_vector(dim)
"""
Test adding vectors with invalid vectors
"""
@pytest.fixture(
scope="function",
params=gen_invalid_vector_ids()
)
def gen_invalid_id(self, request):
yield request.param
@pytest.mark.level(2)
def test_get_vector_id_invalid(self, connect, collection, gen_invalid_id):
invalid_id = gen_invalid_id
with pytest.raises(Exception) as e:
status = connect.get_entity_by_id(collection, [invalid_id])
class TestCollectionNameInvalid(object):
"""
Test adding vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_collection_names()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_get_vectors_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
vectors = gen_vectors(1, dim)
status, result = connect.get_entity_by_id(collection_name, [1])
assert not status.OK()
``` |
{
"source": "JinHai-CN/phantoscope",
"score": 3
} |
#### File: search/common/error.py
```python
class Error(Exception):
def __init__(self, message, error):
self.message = message
self.error = error
@property
def code(self):
return 503
@property
def name(self):
return self.__class__.__name__
@property
def description(self):
return self.message
class OperatorImportError(Error):
pass
class OperatorRegistError(Error):
pass
class PipelineCheckError(Error):
pass
class Insert2SQLError(Error):
pass
class QueryFromSQLError(Error):
pass
class DeleteFromSQLError(Error):
pass
class UpdateFromSQLError(Error):
pass
class NotExistError(Error):
@property
def code(self):
return 404
class MilvusError(Error):
pass
class S3Error(Error):
pass
class DecodeError(Error):
@property
def code(self):
return 400
class DownloadFileError(Error):
@property
def code(self):
return 598
class PipelineIlegalError(Error):
@property
def code(self):
return 400
class RPCExecError(Error):
@property
def code(self):
return 503
class RequestError(Error):
@property
def code(self):
return 400
class NoneVectorError(Error):
@property
def code(self):
return 400
```
#### File: search/operators/operators_api.py
```python
import json
from flask import Blueprint
from flask_restful import reqparse
from common.common import json_response
from operators.operator import all_operators
from operators.operator import regist_operators
from operators.operator import delete_operators
from operators.operator import operator_detail
from operators.operator import operator_health
from common.common import from_view_dict
operator = Blueprint('operator', __name__)
@operator.route("/")
@json_response
def operator_list_api():
return all_operators()
@operator.route("/regist", methods=['POST'])
@json_response
def operator_refresh_api():
args = reqparse.RequestParser(). \
add_argument("endpoint", type=str, required=True). \
add_argument("name", type=str, required=True). \
parse_args()
args = from_view_dict(args)
ed = args['endpoint']
name = args['name']
return regist_operators(ed, name)
@operator.route("/<name>", methods=['DELETE'])
@json_response
def delete_operator_api(name):
return delete_operators(name)
@operator.route("/<name>")
@json_response
def operator_detail_api(name):
return operator_detail(name)
@operator.route("/<name>/health")
@json_response
def operator_health_api(name):
return operator_health(name)
``` |
{
"source": "JinHai-CN/pymilvus",
"score": 2
} |
#### File: pymilvus/tests/TestClient.py
```python
import logging
import pytest
import mock
import faker
import random
import sys
from faker.providers import BaseProvider
from thrift.transport.TSocket import TSocket
from thrift.transport import TTransport
sys.path.append('.')
from milvus.client.Client import Milvus, Prepare, IndexType, Status, TopKQueryResult
from milvus.client.Exceptions import (
NotConnectError,
RepeatingConnectError,
DisconnectNotConnectedClientError)
from milvus.thrift import ttypes, MilvusService
LOGGER = logging.getLogger(__name__)
class FakerProvider(BaseProvider):
def table_name(self):
return 'table_name' + str(random.randint(1000, 9999))
def name(self):
return 'name' + str(random.randint(1000, 9999))
def dim(self):
return random.randint(0, 999)
fake = faker.Faker()
fake.add_provider(FakerProvider)
def range_factory():
param = {
'start': str(random.randint(1, 10)),
'end': str(random.randint(11, 20)),
}
return Prepare.range(**param)
def ranges_factory():
return [range_factory() for _ in range(5)]
def table_schema_factory():
param = {
'table_name': fake.table_name(),
'dimension': random.randint(0, 999),
'index_type': IndexType.IDMAP,
'store_raw_vector': False
}
return Prepare.table_schema(**param)
def records_factory(dimension):
return Prepare.records([[random.random() for _ in range(dimension)] for _ in range(20)])
class TestConnection:
param = {'host': 'localhost', 'port': '5000'}
@mock.patch.object(TSocket, 'open')
def test_true_connect(self, open):
open.return_value = None
cnn = Milvus()
cnn.connect(**self.param)
assert cnn.status.OK
assert cnn.connected
with pytest.raises(RepeatingConnectError):
cnn.connect(**self.param)
cnn.connect()
def test_false_connect(self):
cnn = Milvus()
with pytest.raises(TTransport.TTransportException):
cnn.connect(**self.param)
LOGGER.error(cnn.status)
assert not cnn.status.OK()
@mock.patch.object(TSocket, 'open')
def test_uri(self, open):
open.return_value = None
cnn = Milvus()
cnn.connect(uri='tcp://127.0.0.1:9090')
assert cnn.status.OK()
def test_connect(self):
cnn = Milvus()
with pytest.raises(TTransport.TTransportException):
cnn.connect('127.0.0.2')
assert not cnn.status.OK()
cnn.connect('127.0.0.1', '9999')
assert not cnn.status.OK()
cnn.connect(port='9999')
assert not cnn.status.OK()
cnn.connect(uri='tcp://127.0.0.1:9090')
assert not cnn.status.OK()
@mock.patch.object(TSocket, 'open')
def test_uri_runtime_error(self, open):
open.return_value = None
cnn = Milvus()
with pytest.raises(RuntimeError):
cnn.connect(uri='http://127.0.0.1:9090')
cnn.connect()
assert cnn.status.OK()
@mock.patch.object(TTransport.TBufferedTransport, 'close')
@mock.patch.object(TSocket, 'open')
def test_disconnected(self, close, open):
close.return_value = None
open.return_value = None
cnn = Milvus()
cnn.connect(**self.param)
assert cnn.disconnect().OK()
def test_disconnected_error(self):
cnn = Milvus()
cnn.status = Status(Status.PERMISSION_DENIED)
with pytest.raises(DisconnectNotConnectedClientError):
cnn.disconnect()
class TestTable:
@pytest.fixture
@mock.patch.object(TSocket, 'open')
def client(self, open):
param = {'host': 'localhost', 'port': '5000'}
open.return_value = None
cnn = Milvus()
cnn.connect(**param)
return cnn
@mock.patch.object(MilvusService.Client, 'CreateTable')
def test_create_table(self, CreateTable, client):
CreateTable.return_value = None
param = table_schema_factory()
res = client.create_table(param)
assert res.OK()
def test_create_table_connect_failed_status(self, client):
param = table_schema_factory()
with pytest.raises(NotConnectError):
res = client.create_table(param)
assert res == Status.CONNECT_FAILED
@mock.patch.object(MilvusService.Client, 'DeleteTable')
def test_delete_table(self, DeleteTable, client):
DeleteTable.return_value = None
table_name = 'fake_table_name'
res = client.delete_table(table_name)
assert res.OK
def test_false_delete_table(self, client):
table_name = 'fake_table_name'
with pytest.raises(NotConnectError):
res = client.delete_table(table_name)
LOGGER.info(res)
assert res == Status.CONNECT_FAILED
class TestVector:
@pytest.fixture
@mock.patch.object(TSocket, 'open')
def client(self, open):
param = {'host': 'localhost', 'port': '5000'}
open.return_value = None
cnn = Milvus()
cnn.connect(**param)
return cnn
@mock.patch.object(MilvusService.Client, 'AddVector')
def test_add_vector(self, AddVector, client):
AddVector.return_value = ['a','a']
param = {
'table_name': fake.table_name(),
'records': records_factory(256)
}
res, ids = client.add_vectors(**param)
assert res.OK()
assert isinstance(ids, list)
def test_false_add_vector(self, client):
param = {
'table_name': fake.table_name(),
'records': records_factory(256)
}
with pytest.raises(NotConnectError):
res, ids = client.add_vectors(**param)
assert res == Status.CONNECT_FAILED
@mock.patch.object(Milvus, 'search_vectors')
def test_search_vector(self, search_vectors, client):
search_vectors.return_value = Status(), [[ttypes.QueryResult(111,111)]]
param = {
'table_name': fake.table_name(),
'query_records': records_factory(256),
'top_k': random.randint(0, 10)
}
res, results = client.search_vectors(**param)
assert res.OK()
assert isinstance(results, (list, TopKQueryResult))
def test_false_vector(self, client):
param = {
'table_name': fake.table_name(),
'query_records': records_factory(256),
'query_ranges': ranges_factory(),
'top_k': random.randint(0, 10)
}
with pytest.raises(NotConnectError):
res, results = client.search_vectors(**param)
assert res == Status.CONNECT_FAILED
@mock.patch.object(Milvus, 'search_vectors_in_files')
def test_search_in_files(self, search_vectors_in_files, client):
search_vectors_in_files.return_value = Status(),[[ttypes.QueryResult(00,0.23)]]
param = {
'table_name': fake.table_name(),
'query_records': records_factory(256),
# 'query_ranges': ranges_factory(),
'file_ids': ['a'],
'top_k': random.randint(0,10)
}
sta, result = client.search_vectors_in_files(**param)
assert sta.OK()
def test_false_search_in_files(self, client):
param = {
'table_name': fake.table_name(),
'query_records': records_factory(256),
'query_ranges': ranges_factory(),
'file_ids': ['a'],
'top_k': random.randint(0,10)
}
with pytest.raises(NotConnectError):
sta, results = client.search_vectors_in_files(**param)
assert sta == Status.CONNECT_FAILED
@mock.patch.object(MilvusService.Client, 'DescribeTable')
def test_describe_table(self, DescribeTable, client):
DescribeTable.return_value = table_schema_factory()
table_name = fake.table_name()
res, table_schema = client.describe_table(table_name)
assert res.OK()
assert isinstance(table_schema, ttypes.TableSchema)
def test_false_decribe_table(self, client):
table_name = fake.table_name()
with pytest.raises(NotConnectError):
res, table_schema = client.describe_table(table_name)
assert not res.OK()
assert not table_schema
@mock.patch.object(MilvusService.Client, 'ShowTables')
def test_show_tables(self, ShowTables, client):
ShowTables.return_value = [fake.table_name() for _ in range(10)]
res, tables = client.show_tables()
assert res.OK()
assert isinstance(tables, list)
def test_false_show_tables(self, client):
with pytest.raises(NotConnectError):
res, tables = client.show_tables()
assert not res.OK()
assert not tables
@mock.patch.object(MilvusService.Client, 'GetTableRowCount')
def test_get_table_row_count(self, GetTableRowCount, client):
GetTableRowCount.return_value = 22, None
res, count = client.get_table_row_count('fake_table')
assert res.OK()
def test_false_get_table_row_count(self, client):
with pytest.raises(NotConnectError):
res, count = client.get_table_row_count('fake_table')
assert not res.OK()
assert not count
def test_client_version(self, client):
res = client.client_version()
assert isinstance(res, str)
class TestPrepare:
def test_table_schema(self):
param = {
'table_name': fake.table_name(),
'dimension': random.randint(0, 999),
'index_type': IndexType.IDMAP,
'store_raw_vector': False
}
res = Prepare.table_schema(**param)
assert isinstance(res, ttypes.TableSchema)
def test_range(self):
param = {
'start': '200',
'end': '1000'
}
res = Prepare.range(**param)
assert isinstance(res, ttypes.Range)
assert res.start_value == '200'
assert res.end_value == '1000'
def test_row_record(self):
vec = [random.random() + random.randint(0, 9) for _ in range(256)]
res = Prepare.row_record(vec)
assert isinstance(res, ttypes.RowRecord)
assert isinstance(res.vector_data, bytes)
def test_records(self):
vecs = [[random.random() for _ in range(256)] for _ in range(20)]
res = Prepare.records(vecs)
assert isinstance(res, list)
assert isinstance(res[0], ttypes.RowRecord)
assert isinstance(res[0].vector_data, bytes)
``` |
{
"source": "JinhaiZ/TP-Lamport-RabbitMQ",
"score": 2
} |
#### File: JinhaiZ/TP-Lamport-RabbitMQ/publisher.py
```python
import pika
import logging
import sys
import time
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
class Publisher(object):
def __init__(self, exchange_name, queue_name):
self._exchange_name = exchange_name
self._queue_name = queue_name
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
self._connection = connection
channel = connection.channel()
channel.exchange_declare(exchange=exchange_name,
exchange_type='fanout')
self._channel = channel
# lamport
self._site_id = int(exchange_name[1:])
def send_REQUEST(self, time):
message = "{!s},{!s}".format(self._site_id,time)
self._channel.basic_publish(exchange=self._exchange_name,
routing_key='',
body=message,
properties=pika.BasicProperties(reply_to=self._queue_name,type="REQUEST"))
LOGGER.info('Broadcasted message : %s type REQUEST', message)
def close_connection(self):
self._connection.close()
def main(exchange_name, queue_name):
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
pub = Publisher(exchange_name, queue_name)
try:
pub.send_REQUEST(0)
# time.sleep(2)
# pub.send_RELEASE(3)
except KeyboardInterrupt:
pub.close_connection()
if __name__ == '__main__':
if len(sys.argv) != 3:
print("usage: python publisher.py its_exchange_name its_queue_name")
else:
main(sys.argv[1], sys.argv[2])
``` |
{
"source": "jinhan814/BOJ",
"score": 3
} |
#### File: BOJ/python/13705.py
```python
import sys
from decimal import *
input = sys.stdin.readline
getcontext().prec = 50
getcontext().rounding = ROUND_HALF_UP
A, B, C = map(Decimal, map(int, input().split()))
PI = Decimal('3.14159265358979323846264338327950288419716939937510')
def Sin(x):
x = x % (2 * PI)
getcontext().prec += 2
i, lasts, s, fact, num, sign = 1, 0, x, 1, x, 1
while s != lasts:
lasts = s
i += 2
fact *= i * (i - 1)
num *= x * x
sign *= -1
s += num / fact * sign
getcontext().prec -= 2
return +s
lo, hi = (C - B) / A, (C + B) / A
while hi - lo > Decimal(1e-21):
mid = (lo + hi) / 2
if A * mid + B * Sin(mid) < C: lo = mid
else: hi = mid
print(round(lo, 6))
```
#### File: BOJ/python/8896.py
```python
import sys
input = lambda: sys.stdin.readline().rstrip()
def conv(c):
if c == 'R': return 0
if c == 'S': return 1
return 2
for _ in range(int(input())):
n = int(input())
v = [input() for _ in range(n)]
check = [0] * n
for i in range(len(v[0])):
cur = [0] * 3
for j in range(n):
if not check[j]:
cur[conv(v[j][i])] = 1
if sum(cur) != 2: continue
win = 0 if cur[0] and cur[1] else 1 if cur[1] and cur[2] else 2
for j in range(n):
if not check[j] and conv(v[j][i]) != win:
check[j] = 1
print(check.index(0) + 1 if sum(check) == n - 1 else 0)
```
#### File: BOJ/python/9202.py
```python
import sys
input = lambda: sys.stdin.readline().rstrip()
class TrieNode:
def __init__(self):
self.nxt = [0] * 26
self.idx = -1
class Trie:
def __init__(self):
self.node = [TrieNode()]
def Add(self, s, idx):
cur = 0
for c in s:
if self.node[cur].nxt[ord(c) - 65] == 0:
self.node[cur].nxt[ord(c) - 65] = len(self.node)
self.node.append(TrieNode())
cur = self.node[cur].nxt[ord(c) - 65]
self.node[cur].idx = idx
n = int(input())
s = [input() for _ in range(n)]; input()
T = Trie()
board, v = [], {}
visited = [[0] * 4 for _ in range(4)]
for i in range(n): T.Add(s[i], i)
def DFS(x, y, pos):
if T.node[pos].nxt[ord(board[x][y]) - 65] == 0: return
pos = T.node[pos].nxt[ord(board[x][y]) - 65]; visited[x][y] = 1
if T.node[pos].idx != -1: v.add(T.node[pos].idx)
for dx, dy in ((0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, -1), (1, 0), (1, 1)):
nx = x + dx; ny = y + dy
if 0 <= nx < 4 and 0 <= ny < 4 and not visited[nx][ny]: DFS(nx, ny, pos)
visited[x][y] = 0
def GetScore(n):
if 3 <= n <= 4: return 1
if n == 5: return 2
if n == 6: return 3
if n == 7: return 5
if n == 8: return 11
return 0
for _ in range(int(input())):
board = [input() for _ in range(4)];
v = set()
for i in range(4):
for j in range(4): DFS(i, j, 0)
score, cnt, mx = 0, 0, ''
for i in v:
score += GetScore(len(s[i]))
cnt += 1
if len(mx) < len(s[i]) or len(mx) == len(s[i]) and mx > s[i]: mx = s[i]
print(score, mx, cnt); input()
```
#### File: BOJ/python/9251.py
```python
import sys
input = lambda: sys.stdin.readline().rstrip()
sys.setrecursionlimit(int(1e5))
a = input()
b = input()
DP = [[-1] * len(b) for _ in range(len(a))]
def Sol(i, j):
if i < 0 or j < 0: return 0
if DP[i][j] != -1: return DP[i][j]
DP[i][j] = 0
if a[i] == b[j]: DP[i][j] = Sol(i - 1, j - 1) + 1
else: DP[i][j] = max(Sol(i - 1, j), Sol(i, j - 1))
return DP[i][j]
print(Sol(len(a) - 1, len(b) - 1))
``` |
{
"source": "jinhan814/PyTorch-GAN-Study",
"score": 2
} |
#### File: PGGAN/model/custom_layer.py
```python
from ast import AsyncFunctionDef
import torch
import torch.nn as nn
import math
from numpy import prod
class NormalizationLayer(nn.Module):
def __init__(self):
super(NormalizationLayer, self).__init__()
def forward(self, x, epsilon=1e-8):
return x * (((x**2).mean(dim=1, keepdim=True) + epsilon).rsqrt())
def Upsampling(x, factor=2):
# assert isinstance(factor, int) and factor >=1 ## 이게 있어야할까? 라는 궁금증
# if factor == 1 : ## 이것도 있어야 할까?
# return x
s = x.size()
x = x.view(-1, s[1], s[2], 1, s[3], 1)
x = x.expand(-1, s[1], s[2], factor, s[3], factor)
x = x.contiguous().view(-1, s[1], s[2] * factor, s[3] * factor)
## contiguous를 써주는 이유
'''
tensor를 다양한 함수(transpose,view)등을 이용하여 변형 시킬때 size와 stride는
형태(순서)가 달라질 수 있으나 실제로 메모리상의 원소들의 위치는 바뀌지 않고 접근 인덱스만
바뀐다. 따라서 추가로 변형을 할때는 그 메모리도 재할당해줄 필요가 있다.
'''
return x
def Downsampling(x):
return nn.functional.avg_pool2d(x, (2,2))
def getLayerNormalizationFactor(x):
size = x.weight.size()
fan_in = prod(size[1:])
return math.sqrt(2.0 / fan_in)
class ConstrainedLayer(nn.Module):
def __init__(self,
module,
equalized=True,
lrMul=1.0,
InitBiasToZero=True):
super(ConstrainedLayer,self).__init__()
self.module = module
self.equalized = equalized
self.module.weight.data.normal_(0,1) ## normal_함수: 평균, 표준편차로 정규화
self.module.weight.data /= lrMul
self.weight = getLayerNormalizationFactor(self.module) *lrMul
self. module.bias.data.fill_(0)
def forward(self, x):
x = self.module(x)
x *= self.weight
return x
class EqualizedConv2d(ConstrainedLayer):
def __init__(self,
nChannelsPrevious,
nChannels,
kernelSize,
padding=0,
bias=True,
**kwargs):
ConstrainedLayer.__init__(self,
nn.Conv2d(nChannelsPrevious,
nChannels,
kernelSize,
padding=padding,
bias=bias),
**kwargs
)
class EqualizedLinear(ConstrainedLayer):
def __init__(self,
nChannelsPrevious,
nChannels,
bias=True,
**kwargs):
ConstrainedLayer.__init__(self,
nn.Linear(nChannelsPrevious,
nChannels,
bias=bias),
**kwargs
)
def MiniBatchStddev(x,subGroupSize=4):
size = x.size()
subGroupSize = min(size[0], subGroupSize)
if size[0] % subGroupSize != 0:
subGroupSize = size[0]
G = int(size[0] / subGroupSize)
if subGroupSize > 1:
y = x.view(-1, subGroupSize, size[1], size[2], size[3])
y = torch.var(y, 1)
y = torch.sqrt(y + 1e-8)
y = y.view(G, -1)
y = torch.mean(y, 1).view(G, 1)
y = y.expand(G, size[2]*size[3]).view((G, 1, 1, size[2], size[3]))
y = y.expand(G, subGroupSize, -1, -1, -1)
y = y.contiguous().view((-1, 1, size[2], size[3]))
else:
y = torch.zeros(x.size(0), 1, x.size(2), x.size(3), device=x.device)
return torch.cat([x, y], dim=1)
```
#### File: PyTorch-GAN-Study/PGGAN/train.py
```python
import os
import json
from utils import resizing
import numpy as np
import random
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.model import PGGAN
from dataset import IdolDataset
from torch.utils.data import DataLoader
def SeedEverything(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
def Train(args):
SeedEverything(args['seed'])
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
print(f'device: {"GPU" if use_cuda else "CPU"}')
model = PGGAN(512, 3, 0.2)
batch_size = args['batch_size']
latent_vector_size = 512
criterion = model.criterion
train_dataset = IdolDataset(args['train_dir'])
train_loader = DataLoader(train_dataset, batch_size, shuffle=True)
optimizer_D = model.OptD
optimizer_G = model.OptG
label_real = torch.ones(batch_size, 1).to(device)
label_fake = torch.zeros(batch_size, 1).to(device)
model.train()
for scale in range(args['scales']):
model.ToDevice(device)
for epoch in range(args['epochs'][scale]):
print(f'epoch:{epoch}/{args["epochs"][scale]}')
alpha = 1-epoch/args['epochs'][scale] if scale else 0
model.SetAlpha(alpha)
loss_D_per_epoch = 0
loss_G_per_epoch = 0
for batch_idx, img in enumerate(tqdm(train_loader)):
img = resizing(img, args['img_size'][scale])
img = img.to(device)
z = torch.randn(batch_size, latent_vector_size).to(device)
loss_D = criterion(model.DNet(img), label_real) + criterion(model.DNet(model.GNet(z)), label_fake)
model.DNet.zero_grad()
loss_D.backward()
optimizer_D.step()
# z = torch.randn(batch_size, latent_vector_size).to(device) 확인해보기!
loss_G = criterion(model.DNet(model.GNet(z)), label_real)
model.GNet.zero_grad()
loss_G.backward()
optimizer_G.step()
loss_D_per_epoch += loss_D.item()
loss_G_per_epoch += loss_G.item()
loss_D_per_epoch = loss_D_per_epoch / (batch_idx+1)
loss_G_per_epoch = loss_G_per_epoch / (batch_idx+1)
print(f'Epoch: {epoch+1}/{args["epochs"][scale]}\t Loss_D: {loss_D_per_epoch:.6f}\t Loss_G: {loss_G_per_epoch:.6f}\t')
model.AddScale(args['channels'][scale])
if __name__ == "__main__":
json_path = "/content/PyTorch-GAN-Study/PGGAN/config.json"
with open(json_path) as f:
config_json = json.load(f)
Train(config_json)
```
#### File: PyTorch-GAN-Study/PGGAN/utils.py
```python
import numpy
import torch
import torch.nn.functional as F
def resizing(img,s):
img = F.interpolate(img,size=(s,s))
return img
``` |
{
"source": "jinhang/fcn",
"score": 2
} |
#### File: fcn/fcn/setup.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
import fcn
def _get_data_dir():
this_dir = osp.dirname(osp.abspath(__file__))
data_dir = osp.realpath(osp.join(this_dir, '_data'))
if osp.exists(data_dir):
return data_dir
return ''
data_dir = _get_data_dir()
def download_vgg16_chainermodel():
path = osp.join(data_dir, 'vgg16.chainermodel')
fcn.util.download_data(
pkg_name='fcn',
path=path,
url='https://drive.google.com/uc?id=0B9P1L--7Wd2vSlFjQlJFQjM5TEk',
md5='292e6472062392f5de02ef431bba4a48',
)
return path
def download_fcn8s_caffemodel():
caffemodel_dir = osp.join(data_dir, 'fcn.berkeleyvision.org/voc-fcn8s')
caffemodel = osp.join(caffemodel_dir, 'fcn8s-heavy-pascal.caffemodel')
url_file = osp.join(caffemodel_dir, 'caffemodel-url')
fcn.util.download_data(
pkg_name='fcn',
path=caffemodel,
url=open(url_file).read().strip(),
md5 = 'c03b2953ebd846c270da1a8e8f200c09',
)
return caffemodel
class FCN8sFromCaffeChainerModel(object):
path = osp.join(data_dir, 'fcn8s_from_caffe.chainermodel')
url = 'https://drive.google.com/uc?id=0B9P1L--7Wd2vTXU0QzUwSkVwOFk'
md5 = 'a1083db5a47643b112af69bfa59954f9'
def exists(self):
return (osp.exists(self.path) and
fcn.util.check_md5(self.path, self.md5))
def download(self):
fcn.util.download_data(
pkg_name='fcn',
path=self.path,
url=self.url,
md5=self.md5
)
return self.path
``` |
{
"source": "JinhangZhu/cube-algs-dictator",
"score": 3
} |
#### File: JinhangZhu/cube-algs-dictator/TimeLogger.py
```python
from datetime import datetime
import logging
class TimeLogger:
def __init__(self):
self._last_time = None
def info(self, msg="", start=False):
indent_string = ''
logging.info(indent_string + msg)
current_time = datetime.now()
if start:
self._last_time = current_time
else:
duration = current_time - self._last_time
duration = int(duration.total_seconds() * 1000)
logging.info(indent_string + 'Duration: {} ms'.format(duration))
# @staticmethod
def debug(self, msg=""):
indent_string = ' ' * 4
logging.debug(indent_string + msg)
``` |
{
"source": "JinhangZhu/mosaic-closest",
"score": 3
} |
#### File: JinhangZhu/mosaic-closest/mosaic.py
```python
import ctypes
import re
import os
import multiprocessing as mp
from multiprocessing.sharedctypes import RawArray
import time
import numpy as np
import cv2
import matplotlib.pyplot as plt
from scipy.spatial.distance import euclidean
from tqdm import tqdm
IMG_DIR = './images'
RATIO = 10
threshold = 50
# Get the luminance of an image
def get_luminance(source):
return np.sum(source)/np.size(source)
# Resize the image to be in the shape of (height, width) without distortion
def resize(source, height, width):
shape_row = source.shape[0]
shape_col = source.shape[1]
shrink_ratio = min(shape_row/height, shape_col/width)
resized = cv2.resize(source, (int(shape_col/shrink_ratio)+1, int(shape_row/shrink_ratio)+1), interpolation=cv2.INTER_CUBIC)
result = resized[:height, :width, :]
return result
# Calculate the euclidean distance between two images
def img_distance(source1, source2):
if source1.shape != source2.shape:
msg = "shapes are different {} {}".format(source1.shape, source2.shape)
raise Exception(msg)
array_1 = source1.flatten()
array_2 = source2.flatten()
dist = euclidean(array_1, array_2)
return dist
# Calculate the euclidean distance between two histograms
def hist_distance(source1, source2):
color = ('b', 'g', 'r')
hist_1 = np.zeros((256, 1, 3))
hist_2 = np.zeros((256, 1, 3))
dist = np.zeros((1, 3))
for i, col in enumerate(color):
hist_1[:, :, i] = cv2.calcHist([source1], [i], None, [256], [0, 256])
hist_2[:, :, i] = cv2.calcHist([source2], [i], None, [256], [0, 256])
array_1 = hist_1.flatten()
array_2 = hist_2.flatten()
dist = euclidean(array_1, array_2)
return dist
# Calculate the euclidean distance between two histograms in channels
def hist_distance_channel(source1, source2):
color = ('b', 'g', 'r')
hist_1 = np.zeros((256, 1, 3))
hist_2 = np.zeros((256, 1, 3))
for i, col in enumerate(color):
hist_1[:, :, i] = cv2.calcHist([source1], [i], None, [256], [0, 256])
hist_2[:, :, i] = cv2.calcHist([source2], [i], None, [256], [0, 256])
dist_b = euclidean(hist_1[:, :, 0], hist_2[:, :, 0])
dist_g = euclidean(hist_1[:, :, 1], hist_2[:, :, 1])
dist_r = euclidean(hist_1[:, :, 2], hist_2[:, :, 2])
return dist_b, dist_g, dist_r
# Load images in a specific directory
def load_images(height, width):
img_dir = IMG_DIR
filenames = os.listdir(img_dir)
result = []
print(len(filenames))
for filename in tqdm(filenames):
if not re.search('.jpg', filename, re.I):
continue
try:
filepath = os.path.join(img_dir, filename)
source_im = cv2.imread(filepath)
height_im = source_im.shape[0]
width_im = source_im.shape[1]
if height != height_im or width != width_im:
source_im = resize(source_im, height, width)
result.append(np.array(source_im))
except Exception as e:
msg = 'error with {} - {}'.format(filepath, str(e))
print(msg)
return np.array(result, dtype=np.uint8)
# Find the similarist image from the resource images by comparing euclidean distance
def find_closest_image(q, shared_resource_images, resource_images_shape, shared_result, img_shape, set_height, set_width):
shared_images_array = np.frombuffer(shared_resource_images, dtype=np.uint8)
resource_images = shared_images_array.reshape(resource_images_shape)
while True:
[row, col, pad] = q.get()
print('row: {}, col: {}'.format(row, col))
# Non-grayscale original image
if len(pad.shape) is 3:
# min_dist_img = float("inf") # It acts as an unbounded upper value for comparison.
# min_dist_hist = float("inf") # This is useful for finding lowest values
min_dist_b = float("inf")
min_dist_g = float("inf")
min_dist_r = float("inf")
min_diff_lumi_b = 255
min_diff_lumi_g = 255
min_diff_lumi_r = 255
min_img = None
for resource_image in resource_images:
# Calculate euclidean distance between the image and the pad
# dist_img = img_distance(pad, resource_image)
# dist_hist = hist_distance(pad, resource_image)
dist_b, dist_g, dist_r = hist_distance_channel(pad, resource_image)
# Auxiliary methods to eliminate converse-looking images
diff_lumi_b = abs(get_luminance(resource_image[:, :, 0]) - get_luminance(pad[:, :, 0]))
diff_lumi_g = abs(get_luminance(resource_image[:, :, 1]) - get_luminance(pad[:, :, 1]))
diff_lumi_r = abs(get_luminance(resource_image[:, :, 2]) - get_luminance(pad[:, :, 2]))
# and condition
state_hist = dist_b < min_dist_b and dist_g < min_dist_g and dist_r < min_dist_r
state_lumi = diff_lumi_b < min_diff_lumi_b and diff_lumi_g < min_diff_lumi_g and diff_lumi_r < min_diff_lumi_r
state_thres = diff_lumi_b < threshold and diff_lumi_g < threshold and diff_lumi_r < threshold
if state_thres:
if state_hist and state_lumi:
min_diff_lumi_b = diff_lumi_b
min_diff_lumi_g = diff_lumi_g
min_diff_lumi_r = diff_lumi_r
min_dist_b = dist_b
min_dist_g = dist_g
min_dist_r = dist_r
# Update the most similar image
min_img = resource_image
# Update result image in shared memory
im_res = np.frombuffer(shared_result, dtype=np.uint8).reshape(img_shape)
im_res[row:row+set_height, col:col+set_width, :] = min_img
# Grayscale original image
elif len(pad.shape) is 2:
min_dist_hist = float("inf")
min_diff_lumi = 255
min_img = None
for resource_image in resource_images:
# Calculate euclidean distance of histograms between the image and the pad
dist_hist = hist_distance(pad, resource_image)
# Auxiliary methods to eliminate converse-looking images
diff_lumi = abs(get_luminance(resource_image) - get_luminance(pad))
state_hist = dist_hist < min_dist_hist
state_lumi = diff_lumi < min_diff_lumi
state_thres = diff_lumi < threshold
if state_thres:
if state_hist and state_lumi:
min_diff_lumi = diff_lumi
min_dist_hist = dist_hist
resource_image = cv2.cvtColor(resource_image, cv2.COLOR_BGR2GRAY)
min_img = resource_image
im_res = np.frombuffer(shared_result, dtype=np.uint8).reshape(img_shape)
im_res[row:row+set_height, col:col+set_width, :] = min_img
# Necessary method of JoinableQueue
# To terminate the finished process
q.task_done()
# Return a fixed shape according to the original shape
def get_set_shape():
return [32, 32]
# Generate the mosaic with the resource image file and the output file indicated
def generate_mosaic(infile, outfile):
print('Reading the background image: ' + infile)
img = cv2.imread(infile)
set_height, set_width = get_set_shape()
img_shape = list(img.shape)
# Make corresponding shape of the full image accroding to the set shape of a single one
img_shape[0] = int(img_shape[0]/set_height) * set_height * RATIO
img_shape[1] = int(img_shape[1]/set_width) * set_width * RATIO
print('Resizing the background image...')
img = cv2.resize(img, (img_shape[1], img_shape[0]), interpolation=cv2.INTER_CUBIC)
# REF: cv2.resize(src, dsize[, dst[, fx[, fy[, interpolation]]]]) → dst
# dsize = Size(round(dst.cols), round(dst.rows))
# #INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood
# Print the shape of the output image
print('Shape of the output image: {}'.format(img_shape))
# result image, in the same shape as modified original image
im_res = np.zeros(img_shape, np.uint8)
# All resource image in the set shape to be used
print('Loading images as patches...')
resource_images = load_images(set_height, set_width)
# Get the shape of the images
resource_images_shape = resource_images.shape
# Return a ctypes array allocated from shared memory
# The ctypes array is of the same size of what needs to be shared across multiple processes
shared_resource_images = RawArray(ctypes.c_ubyte, len(resource_images.flatten()))
# np.frombuffer: Intepret shared_resource_images as a 1-dimensional array
# np.coyto: Copy the values from the array: shared_resource_images to another array: resource_images
np.copyto(np.frombuffer(shared_resource_images, dtype=np.uint8).reshape(resource_images_shape), resource_images)
# Reruen a ctypes array allocated from shared memory
# The ctypes array is in the shape of the flattened output image "pool"
shared_result = RawArray(ctypes.c_ubyte, len(im_res.flatten()))
# Create a Queue subclass, a queue which additionally has task_done() and join() methods.
join_queue = mp.JoinableQueue()
for i in range(5):
p = mp.Process(target=find_closest_image, # The callable object to be invoked
name='Process: {}'.format(i),
args=(join_queue, shared_resource_images, resource_images_shape, shared_result, img_shape, set_height, set_width),
daemon=True) # Make daemon process finish execution
p.start()
print('Started process {}'.format(i+1))
# Generate the pads through multiple processes
for row in range(0, img_shape[0], set_height):
for col in range(0, img_shape[1], set_width):
pad = img[row:row+set_height, col:col+set_width, :]
# Put the value in the queue: Which firstly finishes calculation firstly processes the value
join_queue.put([row, col, pad])
join_queue.join()
# Output image file
print('Writing the output image: {}'.format(outfile))
cv2.imwrite(outfile, np.frombuffer(shared_result, dtype=np.uint8).reshape(img_shape))
print('Happy ending.')
if __name__ == "__main__":
str_infile = "original.jpg"
str_outfile = "mosaic.jpg"
t = time.time()
generate_mosaic(str_infile, str_outfile)
elapesd = time.time() - t
print('Duration time: {}'.format(elapesd))
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(cv2.cvtColor(cv2.imread(str_infile), cv2.COLOR_BGR2RGB)) # OpenCV stores images in BGR order instead of RGB.
plt.title(str_infile)
plt.subplot(1, 2, 2)
plt.imshow(cv2.cvtColor(cv2.imread(str_outfile), cv2.COLOR_BGR2RGB))
plt.title(str_outfile)
plt.show()
``` |
{
"source": "JinhangZhu/video_annotator",
"score": 3
} |
#### File: JinhangZhu/video_annotator/utils.py
```python
import os
def get_video_paths_with_places(source, endswith=None):
if endswith is None:
endswith = ['.mp4', '.m2ts', '.mts', '.mov', '.3gp']
need_paths = []
all_extensions = []
need_extensions = []
need_places = []
# r=root, d=directories, f = files
for root, dirs, files in os.walk(source):
for file in files:
filename, file_extension = os.path.splitext(file)
if file_extension not in all_extensions:
all_extensions.append(file_extension)
for end in endswith:
if file.endswith(end.lower()) or file.endswith(end.upper()):
abs_path = os.path.join(root, file)
abs_path = abs_path.replace('\\', '/')
need_paths.append(abs_path)
need_places.append(abs_path.split('/')[2])
if file_extension not in need_extensions:
need_extensions.append(file_extension)
print("\n{} videos are found with {} types of extensions: {}".format(len(need_paths), len(need_extensions), need_extensions))
print("\nAmong all files are {} types of extensions: {}".format(len(all_extensions), all_extensions))
return need_paths, need_places
if __name__ == '__main__':
need_paths, need_places = get_video_paths_with_places(source='G:/Pictures', endswith=['.mp4', '.m2ts', '.mts', '.mov', '.3gp'])
``` |
{
"source": "jin-hao-chen/filegoback",
"score": 2
} |
#### File: apps/files/models.py
```python
import datetime
from apps import db
class Category(db.Model):
__tablename__ = 'category'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(50))
is_current = db.Column(db.Boolean, default=False)
expire_time = db.Column(db.DateTime, nullable=False)
class File(db.Model):
__tablename__ = 'file'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
filename = db.Column(db.String(80), nullable=False)
upload_time = db.Column(db.DateTime, default=datetime.datetime.now)
size = db.Column(db.Float)
category_id = db.Column(db.Integer, db.ForeignKey('category.id'))
sid = db.Column(db.String(10), db.ForeignKey('student.sid'))
owner = db.relationship('Student', uselist=False, backref=db.backref('file'))
def __init__(self, sid, filename, upload_time, size, category_id):
self.sid = sid
self.filename = filename
self.upload_time = upload_time
self.size = size
self.category_id = category_id
def __str__(self):
return 'File(sid=%s, filename=%s, upload_time=%s, size=%s, category_id=%s)'\
% (self.sid, self.filename, self.upload_time, self.size, self.category_id)
def __repr__(self):
return str(self)
```
#### File: filegoback/apps/__init__.py
```python
import os
from settings import PROJ_DIR
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
db = SQLAlchemy()
from apps.files.views import files_bp
from apps.students.views import students_bp
from apps import files
from apps import students
def create_app():
static_dir = os.path.join(PROJ_DIR, 'static')
app = Flask(__name__, static_folder=static_dir)
app.register_blueprint(files_bp, url_prefix='/api/files')
app.register_blueprint(students_bp, url_prefix='/api/students')
app.config['SQLALCHEMY_DATABASE_URI'] \
= 'mysql+pymysql://root:[email protected]:3306/file_go?charset=utf8mb4'
app.config['SQLALCHEMY_POOL_SIZE'] = 6
app.config['SQLALCHEMY_POOL_TIMEOUT'] = 10
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
CORS(app, supports_credentials=True, resources=r'/*')
return app
```
#### File: apps/students/models.py
```python
from apps import db
class Student(db.Model):
__tablename__ = 'student'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
sid = db.Column(db.String(10), nullable=False, unique=True)
name = db.Column(db.String(32), nullable=False)
is_admin = db.Column(db.Boolean, default=False)
def __init__(self, sid, name, is_admin=False):
self.sid = sid
self.name = name
self.is_admin = is_admin
def __str__(self):
return "<Student '%s'>" % self.sid
def __repr__(self):
return str(self)
```
#### File: jin-hao-chen/filegoback/manager.py
```python
from flask_script import Manager
from flask_migrate import (Migrate, MigrateCommand)
import settings
import apps
from apps import create_app
def main():
main_app = create_app()
manager = Manager(main_app)
Migrate(main_app, apps.db)
manager.add_command('db', MigrateCommand)
manager.run()
if __name__ == '__main__':
main()
``` |
{
"source": "jin-hao-chen/team_go_backend",
"score": 2
} |
#### File: apps/clubs/views.py
```python
import re
from django.shortcuts import render
from rest_framework.viewsets import GenericViewSet
from rest_framework.response import Response
from clubs.models import Club
from user_operations.models import Apply
from clubs.models import Institute
from clubs.models import UserClub
from clubs.serializers import UserClubSerializer
from users.models import User
from clubs.serializers import ClubSerializer
from clubs.serializers import InstituteSerializer
from utils import restful_status
from utils import model_tools
class InstituteViewSet(GenericViewSet):
authentication_classes = []
queryset = Institute.objects.all()
serializer_class = InstituteSerializer
def list(self, request, *args, **kwargs):
ret_data = {
'status': restful_status.STATUS_SUCCESS
}
queryset = self.queryset
institute_serializer = self.serializer_class(queryset, many=True)
ret_data['instituteList'] = institute_serializer.data
return Response(ret_data)
def retrieve(self, request, *args, **kwargs):
institute_id = request.META.get('PATH_INFO').split('/')[-2]
ret_data = {
'status': restful_status.STATUS_SUCCESS
}
queryset = self.queryset
institute = queryset.filter(id=institute_id).first()
institute_serializer = self.serializer_class(institute, many=False)
ret_data['institute'] = institute_serializer.data
return Response(ret_data)
class ClubListViewSet(GenericViewSet):
queryset = Club.objects.all()
serializer_class = ClubSerializer
def list(self, request, *args, **kwargs):
ret_data = {
'status': restful_status.STATUS_SUCCESS
}
queryset = self.queryset.order_by('-persons')
user_id = request.query_params.get('userId')
if not user_id:
club_serializer = self.serializer_class(queryset, many=True)
clubs = club_serializer.data
for club in clubs:
club['applications'] = Apply.objects.filter(club=Club.objects.filter(id=club['id']).first()).count()
ret_data['clubList'] = clubs
return Response(ret_data)
user = User.objects.filter(id=user_id).first()
if not user:
ret_data['status'] = restful_status.STATUS_ERROR
ret_data['msg'] = '此用户不存在'
return Response(ret_data)
clubs = queryset.filter(user=user)
club_serializer = self.serializer_class(clubs, many=True)
clubs = club_serializer.data
for club in clubs:
club['applications'] = Apply.objects.filter(club=Club.objects.filter(id=club['id']).first()).count()
ret_data['clubList'] = clubs
return Response(ret_data)
def patch(self, request, *args, **kwargs):
ret_data = {
'status': restful_status.STATUS_SUCCESS
}
club_id = int(request.META.get('PATH_INFO').split('/')[-2])
user = User.objects.filter(username=request.user['username']).first()
if not user:
ret_data['status'] = restful_status.STATUS_ERROR
ret_data['msg'] = '用户' + request.user['username'] + '不存在'
return Response(ret_data)
club_ids = UserClub.objects.filter(user=user).values('club_id')
for club_id_dict in club_ids:
if club_id == club_id_dict['club_id']:
club = Club.objects.filter(id=club_id).update(**request.data.get('dict'))
return Response(ret_data)
else:
ret_data['status'] = restful_status.STATUS_ERROR
ret_data['msg'] = '你没有权限保存内容'
return Response(ret_data)
def retrieve(self, request, *args, **kwargs):
# 请求的 URL 在 request.META.PATH_INFO 中
club_id = request.META.get('PATH_INFO').split('/')[-2]
ret_data = {
'status': restful_status.STATUS_SUCCESS
}
queryset = self.queryset.filter(id=club_id).first()
description = queryset.description
ret_data['description'] = description
ret_data['required'] = queryset.required
return Response(ret_data)
class UserClubViewSet(GenericViewSet):
queryset = UserClub.objects.all()
serializer_class = UserClubSerializer
def list(self, request, *args, **kwargs):
ret_data = {
'status': restful_status.STATUS_SUCCESS
}
user_id = request.query_params.get('userId')
queryset = self.queryset
user = User.objects.filter(id=user_id).first()
if not user:
ret_data['status'] = restful_status.STATUS_ERROR
ret_data['msg'] = '非法请求'
return Response(ret_data)
user_club = queryset.filter(user=user)
ret_data['user_club'] = self.serializer_class(user_club, many=True).data
return Response(ret_data)
```
#### File: apps/users/models.py
```python
from datetime import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import gettext_lazy as _
from users.validators import validate_username
from clubs.models import Institute
from clubs.models import Club
# from ckeditor_uploader.fields import RichTextUploadingField
from DjangoUeditor.models import UEditorField
class AdminInfo(AbstractUser):
"""后台管理员表
Notes
-----
AdminInfo 和 学生表是分开来的, AdminInfo 是提供给后台内部使用的
一般用于后台统计数据, 导出统计数据文件等, 因此 AdminInfo 只包含了
少数的字段
"""
username = models.CharField(max_length=10, validators=[validate_username], verbose_name='学号', unique=True)
password = models.CharField(_('密码'), max_length=128)
nickname = models.CharField(max_length=30, verbose_name='昵称', blank=True, null=True)
mobile = models.CharField(max_length=11, verbose_name='手机号', blank=True, null=True)
email = models.EmailField(_('邮箱'), blank=True)
class Meta:
verbose_name = '管理员'
verbose_name_plural = verbose_name
def __str__(self):
return self.nickname
class Teacher(models.Model):
username = models.CharField(max_length=10, validators=[validate_username], verbose_name='工号', unique=True, \
null=False, blank=False)
nickname = models.CharField(max_length=20, verbose_name='老师名', null=False, blank=False)
mobile = models.CharField(max_length=11, verbose_name='手机号', null=False, blank=False)
club = models.OneToOneField(to=Club, on_delete=models.CASCADE, verbose_name='指导的社团', null=True)
class Meta:
verbose_name = '指导老师'
verbose_name_plural = '指导老师'
def __str__(self):
return self.username
class User(models.Model):
username = models.CharField(max_length=10, validators=[validate_username], verbose_name='学号', unique=True)
password = models.CharField(verbose_name='密码', max_length=128, blank=False, null=False)
nickname = models.CharField(max_length=30, verbose_name='学生名', blank=False, null=False)
mobile = models.CharField(max_length=11, verbose_name='手机号', null=True, blank=True)
is_admin = models.BooleanField(verbose_name='是否为管理员', null=False, blank=False, default=False)
institute = models.ForeignKey(to=Institute, on_delete=models.CASCADE, verbose_name='所属学院', null=False, blank=False)
admission_time = models.DateField(verbose_name='入学时间', null=False, blank=False)
introduction = models.TextField(max_length=3000, verbose_name='个人简介')
# introduction = UEditorField(verbose_name='个人简介', width=600, height=300, toolbars="full")
icon = models.ImageField(upload_to='media/images/users/icons', null=True, blank=True)
clubs = models.ManyToManyField(to=Club, verbose_name='加入的社团', null=True, blank=True)
class Meta:
verbose_name = '用户'
verbose_name_plural = '用户'
def __str__(self):
return self.username
class Token(models.Model):
token = models.CharField(max_length=128, verbose_name='token')
user = models.OneToOneField(to=User, verbose_name='用户', on_delete=models.CASCADE)
create_time = models.DateTimeField(verbose_name='创建时间', default=datetime.now)
class Meta:
verbose_name = 'token'
verbose_name_plural = verbose_name
def __str__(self):
return 'token'
``` |
{
"source": "jin-hao-chen/yank",
"score": 3
} |
#### File: jin-hao-chen/yank/objects.py
```python
import sys
import time
import copy
from ytypes import *
from color_print import fatal_print
class NilObj(object):
def __init__(self):
self.obj_header = ObjHeader(OT_NIL, nil_cls, self)
self.nil = None
def __hash__(self):
return hash(self.nil)
def __eq__(self, other):
return hash(self.nil) == hash(other.nil)
class BoolObj(object):
def __init__(self, boolean):
self.obj_header = ObjHeader(OT_BOOL, bool_cls, self)
self.bool = boolean
def __hash__(self):
return hash(self.bool)
def __eq__(self, other):
return hash(self.bool) == hash(other.bool)
class StrObj(object):
def __init__(self, string):
self.obj_header = ObjHeader(OT_STR, str_cls, self)
self.str = str(string)
def __hash__(self):
return hash(self.str)
def __eq__(self, other):
return hash(self.str) == hash(other.str)
class IntObj(object):
def __init__(self, integer):
self.obj_header = ObjHeader(OT_INT, int_cls, self)
self.int = int(integer)
def __hash__(self):
return hash(self.int)
def __eq__(self, other):
return hash(self.int) == hash(other.int)
class FloatObj(object):
def __init__(self, float_):
self.obj_header = ObjHeader(OT_FLOAT, float_cls, self)
self.float = float(float_)
def __hash__(self):
return hash(self.float)
def __eq__(self, other):
return hash(self.float) == hash(other.float)
class ListObj(object):
def __init__(self, list_=[]):
self.obj_header = ObjHeader(OT_LIST, list_cls, self)
if not list_:
list_ = []
self.list = list(list_)
class MapObj(object):
def __init__(self, map_=None):
self.obj_header = ObjHeader(OT_MAP, map_cls, self)
if not map_:
map_ = {}
self.map = dict(map_)
class ModuleObj(object):
def __init__(self, name):
self.obj_header = ObjHeader(OT_MODULE, module_cls, self)
self.name = name
self.module_var_names = []
self.module_var_name_len = 0
self.module_var_values = []
def add_module_var(self, name):
for i in range(len(self.module_var_names)):
if self.module_var_names[i] == name:
return i
self.module_var_names.append(name)
# self.module_var_values.append(value)
self.module_var_name_len += 1
return self.module_var_name_len - 1
class FunObj(object):
def __init__(self, name, scope=1, arg_num=0):
self.obj_header = ObjHeader(OT_FUN, fun_cls, self)
self.name = name
self.stream = []
self.stream_num = 0
# 存放的是Python级别的字符串, 包括数字和字符串的字面量
self.constants = []
self.constant_num = 0
self.max_used_slots = 0
self.cur_idx = 0
self.scope = scope
self.arg_num = arg_num
def add_constant(self, value):
self.constants.append(value)
self.constant_num += 1
return self.constant_num - 1
def call(obj, method_name):
return obj.obj_header.cls_obj.methods[method_name]
def call_by_value(value, method_name):
return call(value.obj(), method_name)
def exit_if_false(cond):
if not cond:
sys.exit(1)
return True
def _type_to_pystr(obj):
if obj.obj_header.obj_type == OT_INT:
return _int_to_str(obj).str
elif obj.obj_header.obj_type == OT_FLOAT:
return _float_to_str(obj).str
elif obj.obj_header.obj_type == OT_STR:
return _str_to_str(obj).str
elif obj.obj_header.obj_type == OT_LIST:
return _list_to_str(obj).str
elif obj.obj_header.obj_type == OT_MAP:
return _map_to_str(obj).str
elif obj.obj_header.obj_type == OT_NIL:
return _nil_to_str(obj).str
elif obj.obj_header.obj_type == OT_BOOL:
return _bool_to_str(obj).str
elif obj.obj_header.obj_type == OT_FUN:
return _fun_to_str(obj).str
elif obj.obj_header.obj_type == OT_MODULE:
return _module_to_str(obj).str
def type_to_pystr(start, args):
obj = args[start].obj()
if obj.obj_header.obj_type == OT_INT:
return int_to_str(start, args).str
elif obj.obj_header.obj_type == OT_FLOAT:
return float_to_str(start, args).str
elif obj.obj_header.obj_type == OT_STR:
return str_to_str(start, args).str
elif obj.obj_header.obj_type == OT_LIST:
return list_to_str(start, args).str
elif obj.obj_header.obj_type == OT_MAP:
return map_to_str(start, args).str
elif obj.obj_header.obj_type == OT_NIL:
return nil_to_str(start, args).str
elif obj.obj_header.obj_type == OT_BOOL:
return bool_to_str(start, args).str
elif obj.obj_header.obj_type == OT_FUN:
return fun_to_str(start, args).str
elif obj.obj_header.obj_type == OT_MODULE:
return module_to_str(start, args).str
def is_type(obj, obj_type):
return obj.obj_header.obj_type == obj_type
def args_num(pystr):
left = pystr.find('(')
right = pystr.rfind(')')
args_str = pystr[left + 1: right]
return len(args_str.split(','))
class ObjHeader(object):
def __init__(self, obj_type, cls_obj, obj):
self.obj_type = obj_type
self.cls_obj = cls_obj
self.obj = obj
class ClsObj(object):
def __init__(self, name):
self.name = name
self.methods = {}
self.method_names = []
module_cls = ClsObj('module_cls')
fun_cls = ClsObj('fun_cls')
nil_cls = ClsObj('nil_cls')
bool_cls = ClsObj('bool_cls')
str_cls = ClsObj('str_cls')
int_cls = ClsObj('int_cls')
float_cls = ClsObj('float_cls')
list_cls = ClsObj('list_cls')
# map对象比较特别, 在yank中就是对象, map的remove, put, get在内部的方式是@remove, @put, @get, 因为yank中通过map实现对象的, 模仿一下js
map_cls = ClsObj('map_cls')
def return_true(start, args, obj):
args[start].to_value(obj)
return True
def return_false():
return False
# 参数被封装成了yank_list
def fun_call(obj, args):
pass
def nil_to_str(start, args):
obj = args[start].obj()
return return_true(start, args, StrObj(str(obj.nil)))
def nil_equ(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type != OT_NIL:
return return_true(start, args, BoolObj(False))
return return_true(start, args, BoolObj(True))
def nil_hash(start, args):
fatal_print('Runtime error, nil cannot be hashed!')
return return_false()
def nil_bind_methods():
nil_cls.methods['tostr()'] = nil_to_str
nil_cls.methods['==(_)'] = nil_equ
nil_cls.methods['hash(_)'] = nil_hash
nil_cls.method_names = ['tostr()', '==(_)', 'hash()']
nil_cls.methods['_tostr()'] = _nil_to_str
nil_cls.methods['_==(_)'] = _nil_equ
nil_cls.methods['_hash()'] = _nil_hash
def bool_to_str(start, args):
obj = args[start].obj()
return return_true(start, args, StrObj(str(obj.bool)))
def bool_equ(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
return return_true(start, args, BoolObj(obj1.bool == obj2.bool))
def bool_hash(start, args):
obj = args[start].obj()
return return_true(start, args, IntObj(hash(obj.bool)))
def bool_bind_methods():
bool_cls.methods['tostr()'] = bool_to_str
bool_cls.methods['==(_)'] = bool_equ
bool_cls.methods['hash()'] = bool_hash
bool_cls.method_names = ['tostr()', '==(_)', 'hash()']
bool_cls.methods['_tostr()'] = _bool_to_str
bool_cls.methods['_==(_)'] = _bool_equ
bool_cls.methods['_hash()'] = _bool_hash
def str_to_str(start, args):
obj = args[start].obj()
return return_true(start, args, StrObj(str(obj.str)))
def str_equ(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
return return_true(start, args, BoolObj(obj1.str == obj2.str))
def str_hash(start, args):
obj = args[start].obj()
return return_true(start, args, IntObj(hash(obj.str)))
def str_add(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type != OT_STR:
fatal_print('Runtime error, arg2 must be string')
return return_false()
return return_true(start, args, StrObj(obj1.str + obj2.str))
def str_numbers(start, args):
obj = args[start].obj()
if obj.str.isdigit():
ret = IntObj(int(obj.str))
else:
try:
ret = FloatObj(float(obj.str))
except:
fatal_print('Runtime error, cannot convert %s to numbers' % obj.str)
return return_false()
return return_true(start, args, ret)
def str_at(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type != OT_STR:
fatal_print('Runtime error, index must be int')
return return_false()
return return_true(start, args, StrObj(obj1.str[obj2.int]))
def str_len(start, args):
obj = args[start].obj()
return return_true(start, args, IntObj(len(obj.str)))
def str_emtpy(start, args):
obj = args[start].obj()
return return_true(start, args, BoolObj(len(obj.str) == 0))
def _str_numbers(obj):
if obj.str.isdigit():
ret = IntObj(int(obj.str))
else:
try:
ret = FloatObj(float(obj.str))
except:
fatal_print('Runtime error, cannot convert %s to numbers' % obj.str)
sys.exit(1)
return ret
def str_bind_methods():
str_cls.methods['tostr()'] = str_to_str
str_cls.methods['==(_)'] = str_equ
str_cls.methods['hash()'] = str_hash
str_cls.methods['+(_)'] = str_add
str_cls.methods['at(_)'] = str_at
str_cls.methods['len()'] = str_len
str_cls.methods['empty()'] = str_emtpy
str_cls.methods['numbers()'] = str_numbers
str_cls.method_names = ['tostr()', '==(_)', 'hash()', '+(_)', 'at(_)', 'len()', 'empty()', 'numbers()']
str_cls.methods['_tostr()'] = _str_to_str
str_cls.methods['_==(_)'] = _str_equ
str_cls.methods['_hash()'] = _str_hash
str_cls.methods['_+(_)'] = _str_add
str_cls.methods['_at(_)'] = _str_at
str_cls.methods['_len()'] = _str_len
str_cls.methods['_empty()'] = _str_emtpy
str_cls.methods['_numbers()'] = _str_numbers
def int_to_str(start, args):
obj = args[start].obj()
return return_true(start, args, StrObj(str(obj.int)))
def int_equ(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
return return_true(start, args, BoolObj(obj1.int == obj2.int))
def int_hash(start, args):
obj = args[start].obj()
return return_true(start, args, IntObj(hash(obj.int)))
def int_to_float(start, args):
obj = args[start].obj()
return return_true(start, args, FloatObj(float(obj.int)))
def int_add(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type == OT_FLOAT:
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
if obj1.obj_header.obj_type == OT_FLOAT:
return return_true(start, args, FloatObj(obj1.float + obj2.float))
if obj1.obj_header.obj_type == OT_INT:
return return_true(start, args, IntObj(obj1.int + obj2.int))
def int_sub(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type == OT_FLOAT:
obj1 = int_to_float(obj1)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
if obj1.obj_header.obj_type == OT_FLOAT:
return return_true(start, args, FloatObj(obj1.float - obj2.float))
if obj1.obj_header.obj_type == OT_INT:
return return_true(start, args, IntObj(obj1.int - obj2.int))
def int_mul(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type == OT_FLOAT:
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
if obj1.obj_header.obj_type == OT_FLOAT:
return return_true(start, args, FloatObj(obj1.float * obj2.float))
if obj1.obj_header.obj_type == OT_INT:
return return_true(start, args, IntObj(obj1.int * obj2.int))
def int_div(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type == OT_FLOAT:
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
if obj1.obj_header.obj_type == OT_FLOAT:
if obj2.float == 0.0:
fatal_print('Runtime error, arg2 cannot be 0')
return return_false()
return return_true(FloatObj(obj1.float / obj2.float))
if obj1.obj_header.obj_type == OT_INT:
if obj2.int == 0:
fatal_print('Runtime error, arg2 cannot be 0')
return return_false()
return return_true(start, args, IntObj(obj1.int / obj2.int))
def int_mod(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type != OT_INT:
fatal_print('Runtime error, arg2 must be int')
return return_false()
if obj2.int == 0:
fatal_print('Runtime error, arg2 cannot be 0')
return return_false()
return return_true(start, args, IntObj(obj1.int % obj2.int))
def int_gt(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return return_true(start, args, BoolObj(obj1.float > obj2.float))
def int_ge(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, args is not a number')
return return_false()
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return return_true(start, args, BoolObj(obj1.float >= obj2.float))
def int_lt(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, args is not a number')
return return_false()
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return return_true(start, args, BoolObj(obj1.float < obj2.float))
def int_le(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, obj2 is not a number')
return return_false()
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return return_true(start, args, BoolObj(obj1.float <= obj2.float))
def int_bind_methods():
int_cls.methods['tostr()'] = int_to_str
int_cls.methods['==(_)'] = int_equ
int_cls.methods['hash()'] = int_hash
int_cls.methods['float()'] = int_to_float
int_cls.methods['+(_)'] = int_add
int_cls.methods['-(_)'] = int_sub
int_cls.methods['*(_)'] = int_mul
int_cls.methods['/(_)'] = int_div
int_cls.methods['%(_)'] = int_mod
int_cls.methods['>(_)'] = int_gt
int_cls.methods['>=(_)'] = int_ge
int_cls.methods['<(_)'] = int_lt
int_cls.methods['<=(_,_)'] = int_le
int_cls.method_names = ['tostr()', '==(_)', 'hash()', 'float()', \
'+(_)', '-(_)', '*(_)', '/(_)', '%(_)', \
'>(_)', '>=(_)', '<(_)', '<=(_)']
int_cls.methods['_tostr(_)'] = _int_to_str
int_cls.methods['_==(_,_)'] = _int_equ
int_cls.methods['_hash(_)'] = _int_hash
int_cls.methods['_float(_)'] = _int_to_float
int_cls.methods['_+(_,_)'] = _int_add
int_cls.methods['_-(_,_)'] = _int_sub
int_cls.methods['_*(_,_)'] = _int_mul
int_cls.methods['_/(_,_)'] = _int_div
int_cls.methods['_%(_,_)'] = _int_mod
int_cls.methods['_>(_,_)'] = _int_gt
int_cls.methods['_>=(_,_)'] = _int_ge
int_cls.methods['_<(_,_)'] = _int_lt
int_cls.methods['_<=(_,_)'] = _int_le
def float_to_str(start, args):
obj = args[start].obj()
return return_true(start, args, StrObj(str(obj.float)))
def float_equ(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
return return_true(start, args, BoolObj(obj1.float == obj2.float))
def float_hash(start, args):
obj = args[start].obj()
return return_true(start, args, IntObj(hash(obj.float)))
def float_to_int(start, args):
obj = args[start].obj()
return return_true(start, args, IntObj(int(obj.float)))
def float_add(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
return return_true(start, args, FloatObj(obj1.float + obj2.float))
def float_sub(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
return return_true(start, args, FloatObj(obj1.float - obj2.float))
def float_mul(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
if obj2.obj_header.obj_type not in [OT_INT, OT_FLOAT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
return return_true(start, args, FloatObj(obj1.float * obj2.float))
def float_div(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
if obj2.float == 0:
fatal_print('Runtime error, arg2 cannot be 0')
return return_false()
return return_true(start, args, FloatObj(obj1.float / obj2.float))
def float_gt(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return return_true(start, args, BoolObj(obj1.float > obj2.float))
def float_ge(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return return_true(start, args, BoolObj(obj1.float >= obj2.float))
def float_lt(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return return_true(start, args, BoolObj(obj1.float < obj2.float))
def float_le(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return return_true(start, args, BoolObj(obj1.float <= obj2.float))
def float_bind_methods():
float_cls.methods['tostr()'] = float_to_str
float_cls.methods['==(_)'] = float_equ
float_cls.methods['hash()'] = float_hash
float_cls.methods['int()'] = float_to_int
float_cls.methods['+(_)'] = float_add
float_cls.methods['-(_)'] = float_sub
float_cls.methods['*(_)'] = float_mul
float_cls.methods['/(_)'] = float_div
float_cls.methods['>(_)'] = float_gt
float_cls.methods['>=(_)'] = float_ge
float_cls.methods['<(_)'] = float_lt
float_cls.methods['<=(_)'] = float_le
float_cls.method_names = ['tostr()', '==(_)', 'hash()', 'int()', \
'+(_)', '-(_)', '*(_)', '/(_)', '>(_)', \
'>=(_)', '<(_)', '<=(_)']
float_cls.methods['_tostr(_)'] = _float_to_str
float_cls.methods['_==(_,_)'] = _float_equ
float_cls.methods['_hash(_)'] = _float_hash
float_cls.methods['_int(_)'] = _float_to_int
float_cls.methods['_+(_,_)'] = _float_add
float_cls.methods['_-(_,_)'] = _float_sub
float_cls.methods['_*(_,_)'] = _float_mul
float_cls.methods['_/(_,_)'] = _float_div
float_cls.methods['_>(_,_)'] = _float_gt
float_cls.methods['_>=(_,_)'] = _float_ge
float_cls.methods['_<(_,_)'] = _float_lt
float_cls.methods['_<=(_,_)'] = _float_le
def list_len(start, args):
obj = args[start].obj()
return return_true(start, args, IntObj(len(obj.list)))
def list_to_str(start, args):
obj = args[start].obj()
s = '['
for item in obj.list:
s += _type_to_pystr(item.obj()) + ', '
s = s[:-2] + ']'
return return_true(start, args, StrObj(s))
def list_at(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type != OT_INT:
fatal_print('Runtime error, arg2 must be int')
return return_false()
ret = copy.copy(obj1.list[obj2.int])
args[start].value_type = ret.value_type
args[start].obj_header = ret.obj_header
return True
def list_insert(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
obj3 = args[start + 2].obj()
# obj2为下标
if obj2.obj_header.obj_type != OT_INT:
fatal_print('Runtime error, index must be int')
return return_false()
obj1.list.insert(obj2.int, copy.copy(args[start + 2]))
return return_true(start, args, NilObj())
def list_append(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
obj1.list.append(copy.copy(args[start + 1]))
return return_true(start, args, NilObj())
def list_remove(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
# obj2为下标
if obj2.obj_header.obj_type != OT_INT:
fatal_print('Runtime error, index must be int')
return return_false()
length = list_len(obj1)
if obj2.int >= length or obj2.int < 0:
fatal_print('Runtime error, index out of rang')
return return_false()
del obj1.list[obj2.int]
return return_true(start, args, NilObj())
def list_bind_methods():
list_cls.methods['len()'] = list_len
list_cls.methods['tostr()'] = list_to_str
list_cls.methods['insert(_,_)'] = list_insert
list_cls.methods['at(_)'] = list_at
list_cls.methods['remove(_)'] = list_remove
list_cls.methods['append(_)'] = list_append
list_cls.methods['_len(_)'] = _list_len
list_cls.methods['_tostr(_)'] = _list_to_str
list_cls.methods['_insert(_,_,_)'] = _list_insert
list_cls.methods['_at(_,_)'] = _list_at
list_cls.methods['_remove(_,_)'] = _list_remove
list_cls.methods['_append(_,_)'] = _list_append
list_cls.method_names = ['len()', 'tostr()', 'insert(_,_)', 'at(_)', 'remove(_)', 'append(_)']
def map_put(start, args):
obj = args[start].obj()
key = args[start + 1].obj()
val = args[start + 2].obj()
if key.obj_header.obj_type in [OT_MAP, OT_LIST]:
fatal_print('Runtime error, map or list cannot be hashed')
return return_false()
obj.map[copy.copy(args[start + 1])] = copy.copy(args[start + 2])
return return_true(start, args, NilObj())
def map_get(start, args):
obj = args[start].obj()
key = args[start + 1].obj()
if key.obj_header.obj_type == OT_NIL:
fatal_print('Runtime error, key cannot be nil')
return return_false()
if key.obj_header.obj_type in [OT_MAP, OT_LIST]:
fatal_print('Runtime error, map or list cannot be hashed')
return return_false()
if args[start + 1] not in obj.map:
return return_true(start, args, NilObj())
ret = copy.copy(obj.map[args[start + 1]])
args[start].value_type = ret.value_type
args[start].obj_header = ret.obj_header
return True
def map_remove(start, args):
obj = args[start].obj()
key = args[start + 1].obj()
if key.obj_header.obj_type == OT_NIL:
fatal_print('Runtime error, key cannot be nil')
return return_false()
if key.obj_header.obj_type in [OT_MAP, OT_LIST]:
fatal_print('Runtime error, map or list cannot be hashed')
return return_false()
if args[start + 1] in obj.map:
del obj.map[args[start + 1]]
return return_true(start, args, NilObj())
def map_to_str(start, args):
obj = args[start].obj()
s = '{'
for key in obj.map:
s += _type_to_pystr(key.obj()) + ': ' + _type_to_pystr(obj.map[key].obj()) + ', '
return return_true(start, args, StrObj(s[:-2] + '}'))
def map_bind_methods():
map_cls.methods['tostr()'] = map_to_str
map_cls.methods['put(_,_)'] = map_put
map_cls.methods['get(_)'] = map_get
map_cls.methods['remove(_)'] = map_remove
map_cls.methods['@put(_,_)'] = map_put
map_cls.methods['@get(_)'] = map_get
map_cls.methods['@remove(_)'] = map_remove
map_cls.methods['@_tostr(_)'] = _map_to_str
map_cls.methods['@_put(_,_,_)'] = _map_put
map_cls.methods['@_get(_,_)'] = _map_get
map_cls.methods['@_remove(_,_)'] = _map_remove
map_cls.method_names = ['tostr()', 'put(_,_)', 'get(_)', 'remove(_)']
def module_to_str(start, args):
obj = args[start].obj()
addr = str(id(obj))
return return_true(start, args, StrObj('<Module(addr: %s) %s>' % (addr, obj.name)))
def module_bind_methods():
module_cls.methods['tostr(_)'] = module_to_str
module_cls.methods['_tostr(_)'] = _module_to_str
module_cls.method_names = ['tostr()']
def fun_to_str(start, args):
obj = args[start].obj()
addr = str(id(obj))
return return_true(start, args, StrObj('<Function(addr: %s) %s>' % (addr, obj.name)))
def fun_bind_methods():
fun_cls.methods['tostr(_)'] = fun_to_str
fun_cls.methods['_tostr(_)'] = _fun_to_str
def _bind_methods():
module_bind_methods()
fun_bind_methods()
nil_bind_methods()
bool_bind_methods()
str_bind_methods()
int_bind_methods()
float_bind_methods()
list_bind_methods()
map_bind_methods()
# 内部使用
def _nil_to_str(obj):
return StrObj(str(obj.nil))
def _nil_equ(obj1, obj2):
if obj2.obj_header.obj_type != OT_NIL:
return BoolObj(False)
return BoolObj(True)
def _nil_hash(obj):
fatal_print('RuntimetimeError, nil cannot be hashed!')
sys.exit(1)
def _bool_to_str(obj):
return StrObj(str(obj.bool))
def _bool_equ(obj1, obj2):
return BoolObj(obj1.bool == obj2.bool)
def _bool_hash(obj):
return IntObj(hash(obj.bool))
def _str_to_str(obj):
return obj
def _str_equ(obj1, obj2):
return BoolObj(obj1.str == obj2.str)
def _str_hash(obj):
return IntObj(hash(obj.str))
def _str_add(obj1, obj2):
if obj2.obj_header.obj_type != OT_STR:
fatal_print('Runtime error, arg2 must be string')
sys.exit(1)
return StrObj(obj1.str + obj2.str)
def _str_at(obj1, obj2):
if obj2.obj_header.obj_type != OT_STR:
fatal_print('Runtime error, index must be int')
sys.exit(1)
return StrObj(obj1.str[obj2.int])
def _str_len(obj):
return IntObj(len(obj.str))
def _str_emtpy(obj):
return BoolObj(len(obj.str) == 0)
def _int_to_str(obj):
return StrObj(str(obj.int))
def _int_equ(obj1, obj2):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
return BoolObj(obj1.int == obj2.int)
def _int_hash(obj):
return IntObj(hash(obj.int))
def _int_to_float(obj):
return FloatObj(float(obj.int))
def _int_add(obj1, obj2):
if obj2.obj_header.obj_type == OT_FLOAT:
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
sys.exit(1)
if obj1.obj_header.obj_type == OT_FLOAT:
return FloatObj(obj1.float + obj2.float)
if obj1.obj_header.obj_type == OT_INT:
return IntObj(obj1.int + obj2.int)
def _int_sub(obj1, obj2):
if obj2.obj_header.obj_type == OT_FLOAT:
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
sys.exit(1)
if obj1.obj_header.obj_type == OT_FLOAT:
return FloatObj(obj1.float - obj2.float)
if obj1.obj_header.obj_type == OT_INT:
return IntObj(obj1.int - obj2.int)
def _int_mul(obj1, obj2):
if obj2.obj_header.obj_type == OT_FLOAT:
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
sys.exit(1)
if obj1.obj_header.obj_type == OT_FLOAT:
return FloatObj(obj1.float * obj2.float)
if obj1.obj_header.obj_type == OT_INT:
return IntObj(obj1.int * obj2.int)
def _int_div(obj1, obj2):
if obj2.obj_header.obj_type == OT_FLOAT:
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
sys.exit(1)
if obj1.obj_header.obj_type == OT_FLOAT:
if obj2.float == 0.0:
fatal_print('Runtime error, arg2 cannot be 0')
sys.exit(1)
return FloatObj(obj1.float / obj2.float)
if obj1.obj_header.obj_type == OT_INT:
if obj2.int == 0:
fatal_print('Runtime error, arg2 cannot be 0')
sys.exit(1)
return IntObj(obj1.int / obj2.int)
def _int_mod(obj1, obj2):
if obj2.obj_header.obj_type != OT_INT:
fatal_print('Runtime error, arg2 must be int')
sys.exit(1)
if obj2.int == 0:
fatal_print('Runtime error, arg2 cannot be 0')
sys.exit(1)
return IntObj(obj1.int % obj2.int)
def _int_gt(obj1, obj2):
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
sys.exit(1)
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return BoolObj(obj1.float > obj2.float)
def _int_ge(obj1, obj2):
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, args is not a number')
sys.exit(1)
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return BoolObj(obj1.float >= obj2.float)
def _int_lt(obj1, obj2):
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, args is not a number')
sys.exit(1)
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return BoolObj(obj1.float < obj2.float)
def _int_le(obj1, obj2):
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, obj2 is not a number')
sys.exit(1)
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return BoolObj(obj1.float <= obj2.float)
def _float_to_str(obj):
return StrObj(str(obj.float))
def _float_equ(obj1, obj2):
return BoolObj(obj1.float == obj2.float)
def _float_hash(obj):
return IntObj(hash(obj.float))
def _float_to_int(obj):
return IntObj(int(obj.float))
def _float_add(obj1, obj2):
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
sys.exit(1)
return FloatObj(obj1.float + obj2.float)
def _float_sub(obj1, obj2):
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
sys.exit(1)
return FloatObj(obj1.float - obj2.float)
def _float_mul(obj1, obj2):
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
if obj2.obj_header.obj_type not in [OT_INT, OT_FLOAT]:
fatal_print('Runtime error, arg2 is not a number')
sys.exit(1)
return FloatObj(obj1.float * obj2.float)
def _float_div(obj1, obj2):
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
sys.exit(1)
if obj2.float == 0:
fatal_print('Runtime error, arg2 cannot be 0')
return FloatObj(obj1.float / obj2.float)
def _float_gt(obj1, obj2):
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
sys.exit(1)
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return BoolObj(obj1.float > obj2.float)
def _float_ge(obj1, obj2):
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
sys.exit(1)
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return BoolObj(obj1.float >= obj2.float)
def _float_lt(obj1, obj2):
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
sys.exit(1)
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return BoolObj(obj1.float < obj2.float)
def _float_le(obj1, obj2):
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
sys.exit(1)
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return BoolObj(obj1.float <= obj2.float)
def _list_len(obj):
return IntObj(len(obj.list))
def _list_to_str(obj):
s = '['
for item in obj.list:
s += _type_to_pystr(item.obj()) + ', '
s = s[:-2] + ']'
return StrObj(s)
def _list_at(obj1, obj2):
if obj2.obj_header.obj_type != OT_INT:
fatal_print('Runtime error, arg2 must be int')
sys.exit(1)
return obj1.list[obj2.int]
def _list_insert(obj1, obj2, obj3):
# obj2为下标
if obj2.obj_header.obj_type != OT_INT:
fatal_print('Runtime error, index must be int')
sys.exit(1)
obj1.list.insert(obj2.int, copy.copy(obj3))
def _list_append(obj1, obj2):
obj1.list.append(copy.copy(obj2))
def _list_remove(obj1, obj2):
# obj2为下标
if obj2.obj_header.obj_type != OT_INT:
fatal_print('Runtime error, index must be int')
sys.exit(1)
length = list_len(obj1)
if obj2.int >= length or obj2.int < 0:
fatal_print('Runtime error, index out of rang')
sys.exit(1)
del obj1.list[obj2.int]
def _map_put(obj1, key, val):
if key.obj().obj_header.obj_type in [OT_MAP, OT_LIST]:
fatal_print('Runtime error, map or list cannot be hashed')
sys.exit(1)
obj.map[copy.copy(key)] = copy.copy(val)
def _map_get(obj, key):
if key.obj().obj_header.obj_type == OT_NIL:
fatal_print('Runtime error, key cannot be nil')
sys.exit(1)
if key.obj().obj_header.obj_type in [OT_MAP, OT_LIST]:
fatal_print('Runtime error, map or list cannot be hashed')
sys.exit(1)
if key not in obj.map:
return Value.to_value(NilObj())
return copy.copy(obj.map[key])
def _map_remove(obj, key):
if key.obj().obj_header.obj_type == OT_NIL:
fatal_print('Runtime error, key cannot be nil')
sys.exit(1)
if key.obj().obj_header.obj_type in [OT_MAP, OT_LIST]:
fatal_print('Runtime error, map or list cannot be hashed')
sys.exit(1)
if key in obj.map:
del obj.map[key]
def _map_to_str(obj):
s = '{'
for key in obj.map:
s += _type_to_pystr(key.obj()) + ': ' + _type_to_pystr(obj.map[key].obj()) + ', '
return StrObj(s[:-2] + '}')
def _module_to_str(obj):
addr = str(id(obj))
return StrObj('<Module(addr: %s) %s>' % (addr, obj.name))
def _fun_to_str(obj):
addr = str(id(obj))
return StrObj('<Function(addr: %s) %s>' % (addr, obj.name))
class Value(object):
def __init__(self, obj_header=NilObj().obj_header, value_type=VT_NIL):
self.obj_header = obj_header
self.value_type = value_type
def to_value(self, obj):
self.obj_header = obj.obj_header
if is_type(obj, OT_INT):
self.value_type = VT_INT
elif is_type(obj, OT_FLOAT):
self.value_type = VT_FLOAT
elif is_type(obj, OT_STR):
self.value_type = VT_STR
elif is_type(obj, OT_FUN):
self.value_type = VT_FUN
elif is_type(obj, OT_MAP):
self.value_type = VT_MAP
elif is_type(obj, OT_LIST):
self.value_type = VT_LIST
elif is_type(obj, OT_NIL):
self.value_type = VT_NIL
elif is_type(obj, OT_BOOL):
if obj.bool:
self.value_type = VT_TRUE
else:
self.value_type = VT_FALSE
elif is_type(obj, OT_MODULE):
self.value_type = VT_MODULE
@classmethod
def new_value(cls, obj):
ret = Value(obj.obj_header)
if is_type(obj, OT_INT):
ret.value_type = VT_INT
elif is_type(obj, OT_FLOAT):
ret.value_type = VT_FLOAT
elif is_type(obj, OT_STR):
ret.value_type = VT_STR
elif is_type(obj, OT_FUN):
ret.value_type = VT_FUN
elif is_type(obj, OT_MAP):
ret.value_type = VT_MAP
elif is_type(obj, OT_LIST):
ret.value_type = VT_LIST
elif is_type(obj, OT_NIL):
ret.value_type = VT_NIL
elif is_type(obj, OT_BOOL):
if obj.bool:
ret.value_type = VT_TRUE
else:
ret.value_type = VT_FALSE
elif is_type(obj, OT_MODULE):
ret.value_type = VT_MODULE
return ret
def clear_value(self):
self.obj_header = NilObj().obj_header
self.value_type = VT_NIL
def obj(self):
return self.obj_header.obj
def __eq__(self, other):
return self.__hash__() == other.__hash__()
def __hash__(self):
return call(self.obj(), '_hash(_)')(self.obj()).int
class Frame(object):
def __init__(self, thread, start):
self.thread = thread
self.start = start
# 含头含尾
self.end = self.start
def extend(self, steps=1):
self.end += steps
if self.thread.size - 1 - self.end <= 512:
self.thread.values.extend([Value() for _ in range(self.thread.size)])
self.thread.size *= 2
def __getitem__(self, idx):
return self.thread.values[self.start + idx]
def __setitem__(self, idx, val):
self.thread.values[self.start + idx] = val
def __str__(self):
return str((self.start, self.end))
class Thread(object):
def __init__(self, size=1024):
self.values = [Value() for _ in range(size)]
self.frames = []
self.frame_num = 0
self.start = 0
self.size = size
def alloc_frame(self):
# 第一个frame
if not self.frames:
frame = Frame(self, self.start)
self.frames.append(frame)
self.frame_num += 1
return frame
else:
cur_frame = self.frames[self.frame_num - 1]
next_idx = cur_frame.end + 1
if self.size - 1 - next_idx <= 512:
self.values.extend([Value() for _ in range(self.size)])
self.size *= 2
frame = Frame(self, next_idx)
self.frames.append(frame)
self.frame_num += 1
return frame
def recycle_frame(self):
"""回收当前的frame
"""
del self.frames[self.frame_num - 1]
self.frame_num -= 1
# 如果还有上一个frame就返回上一个frame
if self.frame_num >= 1:
return self.frames[self.frame_num - 1]
# 没有就返回None
return None
_bind_methods()
```
#### File: jin-hao-chen/yank/opcode.py
```python
LOAD_CONST = 0
STORE_CONST = 1
LOAD_LOCAL = 2
STORE_LOCAL = 3
LOAD_GLOBAL = 4
STORE_GLOBAL = 5
RETURN_VALUE = 6 # 回收frame并返回返回值到老的栈顶
PUSH_NIL = 7
POP = 8
JUMP_IF_FALSE = 9
JUMP = 10
PUSH_TRUE = 11
PUSH_FALSE = 12
LOOP = 13
CALL0 = 18
CALL1 = 19
CALL2 = 20
CALL3 = 21
CALL4 = 22
CALL5 = 23
CALL6 = 24
CALL7 = 25
CALL8 = 26
CALL9 = 27
CALL10 = 28
CALL11 = 29
CALL12 = 30
CALL13 = 31
CALL14 = 32
CALL15 = 33
CALL16 = 34
CALL17 = 35
CALL18 = 36
CALL19 = 37
CALL20 = 38
AND = 50
OR = 51
NOT = 52
END = 100
def opcode_print(stream):
ptr = 0
stream_len = len(stream)
i = 0
while ptr < stream_len:
i += 1
op = stream[ptr]
s = ''
if op == LOAD_CONST:
s += 'LOAD_CONST '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == PUSH_TRUE:
s += 'PUSH_TRUE '
elif op == PUSH_FALSE:
s += 'PUSH_FALSE'
elif op == CALL0:
s += 'CALL0 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL1:
s += 'CALL1 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL2:
s += 'CALL2 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL3:
s += 'CALL3 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL4:
s += 'CALL4 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL5:
s += 'CALL5 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL6:
s += 'CALL6 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL7:
s += 'CALL7 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL8:
s += 'CALL8 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL9:
s += 'CALL9 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL10:
s += 'CALL10 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL11:
s += 'CALL11'
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL12:
s += 'CALL12 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL13:
s += 'CALL13 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL14:
s += 'CALL14 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL15:
s += 'CALL15 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL16:
s += 'CALL16 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL17:
s += 'CALL17 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL18:
s += 'CALL18 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL19:
s += 'CALL19 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == CALL20:
s += 'CALL20 '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == PUSH_NIL:
s += 'PUSH_NIL '
elif op == LOAD_LOCAL:
s += 'LOAD_LOCAL '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == LOAD_GLOBAL:
s += 'LOAD_GLOBAL '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == STORE_LOCAL:
s += 'STORE_LOCAL '
ptr += 1
operand = stream[ptr]
s += str(operand)
elif op == STORE_GLOBAL:
s += 'STORE_GLOBAL '
ptr += 1
operand = stream[ptr]
s += str(operand)
ptr += 1
print(str(i) + ' ' + s)
```
#### File: jin-hao-chen/yank/vm.py
```python
from objects import (module_cls, fun_cls, nil_cls, bool_cls, str_cls, int_cls, \
float_cls, list_cls, map_cls)
class VM(object):
def __init__(self):
self.fun_cls = fun_cls
self.nil_cls = nil_cls
self.bool_cls = bool_cls
self.str_cls = str_cls
self.int_cls = int_cls
self.float_cls = float_cls
self.list_cls = list_cls
self.map_cls = map_cls
self.module_cls = module_cls
self.builtin_clses = [fun_cls, nil_cls, bool_cls, str_cls, int_cls, float_cls, list_cls, map_cls, module_cls]
"""
self.method_names = ['nil.tostr()', 'nil.==(_)', 'nil.hash(_)',
'bool.tostr()', 'bool.==(_)', 'bool.hash()',
'str.tostr()', 'str.==(_)', 'str.hash()','str.+(_)',
'str.at(_)', 'str.len()', 'str.empty()', 'str.numbers()',
'int.tostr()', 'int.==(_)', 'int.hash()', 'int.float()',
'int.+(_)', 'int.-(_)', 'int.*(_)', 'int./(_)', 'int.%(_)',
'int.>(_)', 'int.>=(_)', 'int.<(_)', 'int.<=(_)',
'float.tostr()', 'float.==(_)', 'float.hash()', 'float.int()',
'float.+(_)', 'float.-(_)', 'float.*(_)', 'float./(_)',
'float.>(_)', 'float.>=(_)', 'float.<(_)', 'float.<=(_)',
'list.len()', 'list.tostr()', 'list.insert(_,_)', 'list.at(_)',
'list.remove(_)', 'list.append(_)', 'map.tostr()',
'map.put(_,_)', 'map.get(_)', 'map.remove(_)', 'map.@put(_,_)',
'map.@get(_)', 'map.@remove(_)']
"""
def build_core_module(self):
pass
``` |
{
"source": "jin-hao-chen/youdaobackend",
"score": 2
} |
#### File: apps/advance/views.py
```python
import os
import time
import uuid
import json
import datetime
import io
import shutil
import base64
import hashlib
from pydub import AudioSegment
import wave
from PIL import Image, ImageDraw, ImageFont
from flask import Blueprint
from flask.views import MethodView
from flask import (request, jsonify,
send_from_directory)
import requests
from apps import settings
from apps.advance import tools
advance_bp = Blueprint('advance', __name__)
LANG_CODE = {
'英汉': ['en', 'zh-CHS'],
'汉汉': ['zh-CHS', 'zh-CHS'],
'日汉': ['ja', 'zh-CHS'],
'法汉': ['fr', 'zh-CHS'],
'韩汉': ['ko', 'zh-CHS'],
'汉英': ['zh-CHS', 'en'],
'汉日': ['zh-CHS', 'ja'],
'汉法': ['zh-CHS', 'fr'],
'汉韩': ['zh-CHS', 'ko']
}
def advance_scan_words():
ret = {
'code': settings.CODE_OK
}
img = request.files.get('image')
out = img.read()
img_base64 = base64.b64encode(out).decode('utf-8')
image = Image.open(io.BytesIO(out))
data = tools.ocr(img_base64, LANG_CODE[request.form.get('lang')][0])
if data['code'] == settings.CODE_ERR:
ret['code'] = settings.CODE_ERR
return ret
for line in data['items']:
data1 = tools.scan_words(line['text'],
LANG_CODE[request.form.get('lang')][0],
LANG_CODE[request.form.get('lang')][1])
if data1['code'] == settings.CODE_ERR:
ret['code'] = settings.CODE_ERR
return ret
translation = data1['item']
tools.add_text(image, translation[0], line['loc'])
byteIO = io.BytesIO()
image.save(byteIO, format='jpeg')
item = base64.b64encode(byteIO.getvalue()).decode('utf-8')
ret['item'] = item
return ret
def advance_image_recognize():
ret = {
'code': settings.CODE_OK
}
img = request.files.get('file')
out = img.read()
img_base64 = base64.b64encode(out).decode('utf-8')
result = tools.recognize(img_base64)
if result is None:
ret['code'] = settings.CODE_ERR
return ret
data = []
for res in result:
if res['score'] > 0.3:
tmp = tools.scan_words(res['keyword'],
LANG_CODE[request.form.get('lang')][0],
LANG_CODE[request.form.get('lang')][1])
if tmp['code'] == settings.CODE_ERR:
ret['code'] = settings.CODE_ERR
return ret
data.append(tmp['item'])
ret['items'] = data
return ret
def advace_voice_translate():
audio = request.files['file']
prefix = audio.filename.split('.')[0]
old_path = os.path.join(settings.STATIC_DIR, audio.filename)
audio.save(old_path)
new_path = os.path.join(settings.STATIC_DIR, prefix + '.wav')
tools.mp3_to_wav(old_path, new_path)
os.remove(old_path)
wav_info = wave.open(new_path, 'rb')
sample_rate = wav_info.getframerate()
nchannels = wav_info.getnchannels()
wav_info.close()
with open(new_path, 'rb') as file_wav:
audio_base64 = base64.b64encode(file_wav.read()).decode('utf-8')
origin = tools.voice_recognize(audio_base64, 16000, nchannels, LANG_CODE[request.form.get('lang')][0])
if origin is None:
os.remove(new_path)
return {
'code': settings.CODE_ERR
}
result = tools.scan_words(origin, LANG_CODE[request.form.get('lang')][0], LANG_CODE[request.form.get('lang')][1])['item'][0]
ret = {
'code': settings.CODE_OK,
'origin': origin,
'result': result
}
os.remove(new_path)
return ret
# used by search_word()
def _fetch_data(data, name):
if name in data.keys():
return data[name]
return []
# used by search_word()
def _fetch_data2(data, name1, name2):
ret = _fetch_data(data, name1)
if ret:
if name2 in ret.keys():
return ret[name2]
return ret
def search_word():
ret = {
'code': settings.CODE_OK
}
explain = tools.translate(request.form.get('word'),
LANG_CODE[request.form.get('lang')][0],
LANG_CODE[request.form.get('lang')][1])
result = {
'explain': _fetch_data2(explain, 'basic', 'explains'),
'wfs': _fetch_data2(explain, 'basic', 'wfs'),
'web': _fetch_data(explain, 'web'),
'phonetic': _fetch_data2(explain, 'basic', 'phonetic'),
'translation': _fetch_data(explain, 'translation')[0],
'speakUrl': _fetch_data(explain, 'speakUrl')
}
ret['item'] = result
return ret
class AdvanceView(MethodView):
methods = ['GET', 'POST']
def post(self):
if request.args.get('scan_words') == 'true':
return jsonify(advance_scan_words())
if request.args.get('image_recognition') == 'true':
return jsonify(advance_image_recognize())
if request.args.get('voice_translate') == 'true':
return jsonify(advace_voice_translate())
if request.args.get('search_word') == 'true':
return jsonify(search_word())
advance_bp.add_url_rule('', view_func=AdvanceView.as_view(name='advance_view'), endpoint='advance_view', strict_slashes=True)
```
#### File: youdaobackend/apps/__init__.py
```python
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from apps import settings
db = SQLAlchemy()
# import blueprints
from apps.translation.views import translation_bp
from apps.advance.views import advance_bp
def create_app():
app = Flask(__name__, static_folder=settings.STATIC_DIR)
app.register_blueprint(translation_bp, url_prefix=settings.API_PREFIX + '/translation')
app.register_blueprint(advance_bp, url_prefix=settings.API_PREFIX + '/advance')
# change your password
app.config['SQLALCHEMY_DATABASE_URI'] \
= 'mysql+pymysql://root:[email protected]:3306/file_go?charset=utf8mb4'
app.config['SQLALCHEMY_POOL_SIZE'] = 6
app.config['SQLALCHEMY_POOL_TIMEOUT'] = 10
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
CORS(app, supports_credentials=True, resources=r'/*')
return app
``` |
{
"source": "jinhaotutu/flask_iot_py",
"score": 2
} |
#### File: jinhaotutu/flask_iot_py/main.py
```python
from flask import Flask
from flask_restful import reqparse, abort, Api, Resource
import wx_api
import dev_api
import tool_api
import sence_api
import link_api
# Flask初始化参数尽量使用你的包名,这个初始化方式是官方推荐的,官方解释:http://flask.pocoo.org/docs/0.12/api/#flask.Flask
app = Flask(__name__)
api = Api(app)
token = '<PASSWORD>'
def check_token(token_in):
if (not token_in):
print ('not token')
return -1
elif (token_in != token):
print('token error: ' + token_in)
return -1
else:
return 0
# get blog info
class GetBlogInfo(Resource):
def post(self):
if (0 != check_token(reqparse.request.headers.get('token'))):
abort(403)
return
else:
return wx_api.wx_blog_info_get(reqparse.request.json)
# get dev list
class GetDevList(Resource):
def post(self):
if (0 != check_token(reqparse.request.headers.get('token'))):
abort(403)
return
else:
return dev_api.dev_list_get(reqparse.request.json)
# get tool list
class GetToolList(Resource):
def post(self):
if (0 != check_token(reqparse.request.headers.get('token'))):
abort(403)
return
else:
return tool_api.tool_list_get(reqparse.request.json)
# get sence list
class GetSenceList(Resource):
def post(self):
if (0 != check_token(reqparse.request.headers.get('token'))):
abort(403)
return
else:
return sence_api.sence_list_get(reqparse.request.json)
# get link list
class GetLinkList(Resource):
def post(self):
if (0 != check_token(reqparse.request.headers.get('token'))):
abort(403)
return
else:
return link_api.link_list_get(reqparse.request.json)
##
## Actually setup the Api resource routing here
##
api.add_resource(GetBlogInfo, '/iot/blog/info/get')
api.add_resource(GetDevList, '/iot/dev/list/get')
api.add_resource(GetToolList, '/iot/tool/list/get')
api.add_resource(GetSenceList, '/iot/sence/list/get')
api.add_resource(GetLinkList, '/iot/link/list/get')
if __name__ == '__main__':
# init
wx_api.wx_api_init()
# 官方启动方式参见:http://flask.pocoo.org/docs/0.12/quickstart/#a-minimal-application
app.run(host="127.0.0.1", port=8090, debug=True) #, ssl_context='adhoc')
```
#### File: jinhaotutu/flask_iot_py/sence_api.py
```python
import json
import requests
list_test = {
'type' : 'sence',
'item' : [
{
'name' : '扫码',
'image_url' : '1234',
'dev_id' : '0000000001',
'third_url' : '1234'
},{
'name' : 'AP配网',
'image_url' : '1234',
'dev_id' : '0000000002'
},{
'name' : '蓝牙',
'image_url' : '1234',
'dev_id' : '0000000003'
}
]
}
def sence_list_get(params):
return list_test
``` |
{
"source": "jinheeson1008/tensorflow-lstm-regression",
"score": 2
} |
#### File: traceml/integrations/xgboost.py
```python
import ujson
from traceml import tracking
from traceml.exceptions import TracemlException
from traceml.logger import logger
try:
import xgboost as xgb
from xgboost import Booster
except ImportError:
raise TracemlException("xgboost is required to use the tracking callback")
def _get_cv(model):
return getattr(model, "cvfolds", False)
def _log_importance(run, model, model_folds, max_num_features, **kwargs):
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Please install matplotlib to log importance")
if model_folds:
for i, fold in enumerate(model_folds):
importance = xgb.plot_importance(
fold.bst, max_num_features=max_num_features, **kwargs
)
run.log_mpl_plotly_chart(
name="feature_importance", figure=importance.figure, step=i
)
else:
importance = xgb.plot_importance(
model, max_num_features=max_num_features, **kwargs
)
run.log_mpl_plotly_chart(name="feature_importance", figure=importance.figure)
plt.close("all")
def _log_model(run, model, model_folds):
def _save(file_model, file_name):
asset_path = run.get_outputs_path(file_name)
file_model.save_model(asset_path)
run.log_model_ref(asset_path, framework="xgboost")
if model_folds:
for i, cvpack in enumerate(model_folds):
_save(cvpack.bst, "model-{}".format(i))
else: # train case
_save(model, "model")
def callback(
log_model: bool = True,
log_importance: bool = True,
max_num_features: int = None,
run: "Run" = None,
):
run = tracking.get_or_create_run(run)
def callback(env):
# Log metrics after iteration
metrics = {}
for item in env.evaluation_result_list:
if len(item) == 2: # train case
metrics[item[0]] = item[1]
if len(item) == 3: # cv case
metrics["{}-mean".format(item[0])] = item[1]
metrics["{}-std".format(item[0])] = item[2]
run.log_metrics(
**metrics,
step=env.iteration,
)
model = getattr(env, "model")
model_folds = _get_cv(env)
# Log booster, end of training
if log_model:
_log_model(run=run, model=model, model_folds=model_folds)
# Log feature importance, end of training
if env.iteration + 1 == env.end_iteration and log_importance:
try:
_log_importance(
run,
model=model,
model_folds=model_folds,
max_num_features=max_num_features,
)
except Exception as e:
logger.info("Failed logging feature importance %s", e)
return callback
class Callback(xgb.callback.TrainingCallback):
def __init__(
self,
run: "Run" = None,
log_model: bool = True,
log_importance: bool = True,
importance_type: str = "gain",
max_num_features: int = None,
):
self.log_model: bool = log_model
self.log_importance: bool = log_importance
self.importance_type: str = importance_type
self.max_num_features: int = max_num_features
self.run = tracking.get_or_create_run(run)
def after_training(self, model: Booster) -> Booster:
model_folds = _get_cv(model)
if self.log_model:
_log_model(run=self.run, model=model, model_folds=model_folds)
if self.log_importance:
_log_importance(
self.run,
model=model,
model_folds=model_folds,
max_num_features=self.max_num_features,
)
if model_folds:
config = {}
for i, fold in enumerate(model_folds):
config["fold_{}_config".format(i)] = ujson.loads(fold.bst.save_config())
if config:
self.run.log_inputs(**config)
else:
self.run.log_inputs(config=ujson.loads(model.save_config()))
outputs = {}
if "best_score" in model.attributes().keys():
outputs["best_score"] = model.attributes()["best_score"]
if "best_iteration" in model.attributes().keys():
outputs["best_iteration"] = model.attributes()["best_iteration"]
self.run.log_outputs(**outputs)
return model
def after_iteration(self, model: Booster, epoch: int, evals_log: dict) -> bool:
metrics = {}
for stage, metrics_dict in evals_log.items():
for metric_name, metric_values in evals_log[stage].items():
if _get_cv(model):
mean, std = metric_values[-1]
metrics["{}-{}-mean".format(stage, metric_name)] = mean
metrics["{}-{}-std".format(stage, metric_name)] = std
else:
metrics["{}-{}".format(stage, metric_name)] = metric_values[-1]
if metrics:
self.run.log_metrics(step=epoch, **metrics)
return False
```
#### File: processors/events_processors/events_video_processors.py
```python
from polyaxon.constants.globals import UNKNOWN
from polyaxon.utils.np_utils import calculate_scale_factor, to_np
from polyaxon.utils.path_utils import check_or_create_path
from traceml.events import V1EventVideo
from traceml.logger import logger
from traceml.processors.errors import MOVIEPY_ERROR_MESSAGE, NUMPY_ERROR_MESSAGE
try:
import numpy as np
except ImportError:
np = None
def video(
asset_path: str, tensor, fps=4, content_type="gif", asset_rel_path: str = None
):
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
tensor = to_np(tensor)
tensor = prepare_video(tensor)
# If user passes in uint8, then we don't need to rescale by 255
scale_factor = calculate_scale_factor(tensor)
tensor = tensor.astype(np.float32)
tensor = (tensor * scale_factor).astype(np.uint8)
return make_video(
asset_path, tensor, fps, content_type, asset_rel_path=asset_rel_path
)
def make_video(
asset_path: str, tensor, fps, content_type="gif", asset_rel_path: str = None
):
try:
import moviepy # noqa: F401
except ImportError:
logger.warning(MOVIEPY_ERROR_MESSAGE)
return UNKNOWN
try:
from moviepy import editor as mpy
except ImportError:
logger.warning(
"moviepy is installed, but can't import moviepy.editor.",
"Some packages could be missing [imageio, requests]",
)
return
t, h, w, c = tensor.shape
# encode sequence of images into gif string
clip = mpy.ImageSequenceClip(list(tensor), fps=fps)
check_or_create_path(asset_path, is_dir=False)
try: # older version of moviepy
if content_type == "gif":
clip.write_gif(asset_path, verbose=False, progress_bar=False)
else:
clip.write_videofile(asset_path, verbose=False, progress_bar=False)
except TypeError:
if content_type == "gif":
clip.write_gif(asset_path, verbose=False)
else:
clip.write_videofile(asset_path, verbose=False)
return V1EventVideo(
height=h,
width=w,
colorspace=c,
path=asset_rel_path or asset_path,
content_type=content_type,
)
def prepare_video(data):
"""
Converts a 5D tensor [batchsize, time(frame), channel(color), height, width]
into 4D tensor with dimension [time(frame), new_width, new_height, channel].
A batch of images are spreaded to a grid, which forms a frame.
e.g. Video with batchsize 16 will have a 4x4 grid.
"""
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
b, t, c, h, w = data.shape
if data.dtype == np.uint8:
data = np.float32(data) / 255.0
def is_power2(num):
return num != 0 and ((num & (num - 1)) == 0)
# pad to nearest power of 2, all at once
if not is_power2(data.shape[0]):
len_addition = int(2 ** data.shape[0].bit_length() - data.shape[0])
data = np.concatenate(
(data, np.zeros(shape=(len_addition, t, c, h, w))), axis=0
)
n_rows = 2 ** ((b.bit_length() - 1) // 2)
n_cols = data.shape[0] // n_rows
data = np.reshape(data, newshape=(n_rows, n_cols, t, c, h, w))
data = np.transpose(data, axes=(2, 0, 4, 1, 5, 3))
return np.reshape(data, newshape=(t, n_rows * h, n_cols * w, c))
```
#### File: traceml/processors/psutil_processor.py
```python
from typing import Dict, List
from traceml.processors.events_processors import metrics_dict_to_list
try:
import psutil
except ImportError:
psutil = None
def can_log_psutil_resources():
return psutil is not None
def query_psutil() -> Dict:
results = {}
try:
# psutil <= 5.6.2 did not have getloadavg:
if hasattr(psutil, "getloadavg"):
results["load"] = psutil.getloadavg()[0]
else:
# Do not log an empty metric
pass
except OSError:
pass
vm = psutil.virtual_memory()
results["cpu"] = psutil.cpu_percent(interval=None)
results["memory"] = vm.percent
return results
def get_psutils_metrics() -> List:
return metrics_dict_to_list(query_psutil())
```
#### File: traceml/processors/units_processors.py
```python
from typing import Union
def _sanitize_value(value: Union[str, int, float]) -> Union[int, float]:
fvalue = float(value)
ivalue = int(fvalue)
return ivalue if ivalue == fvalue else fvalue
def to_cpu_value(cpu_definition: Union[str, int, float]) -> float:
try:
return float(cpu_definition)
except (ValueError, TypeError):
pass
cpu_definition = cpu_definition.lower()
cpu_unit = cpu_definition[-1]
cpu_value = cpu_definition[:-1]
if cpu_unit == "m":
cpu = _sanitize_value(cpu_value) / 1000
elif cpu_unit == "u":
cpu = _sanitize_value(cpu_value) / 1000**2
elif cpu_unit == "n":
cpu = _sanitize_value(cpu_value) / 1000**3
else:
cpu = cpu_definition
return _sanitize_value(cpu)
def to_memory_bytes(mem_definition: Union[str, int, float]) -> int:
try:
return int(float(mem_definition))
except (ValueError, TypeError):
pass
def _get_value(unit, value, multiplier):
if unit in multiplier.keys():
return _sanitize_value(value) * multiplier.get(unit, 1)
fixed_point_unit_multiplier = {
"k": 1000,
"m": 1000**2,
"g": 1000**3,
"t": 1000**4,
"p": 1000**5,
"e": 1000**6,
}
power_two_unit_multiplier = {
"ki": 1024,
"mi": 1024**2,
"gi": 1024**3,
"ti": 1024**4,
"pi": 1024**5,
"ei": 1024**6,
}
mem_definition = mem_definition.lower()
mem_unit = mem_definition[-2:]
mem_value = mem_definition[:-2]
memory = _get_value(mem_unit, mem_value, power_two_unit_multiplier)
if memory is not None:
return memory
mem_unit = mem_definition[-1:]
mem_value = mem_definition[:-1]
memory = _get_value(mem_unit, mem_value, fixed_point_unit_multiplier)
if memory is not None:
return memory
return 0
def to_unit_memory(number, precision: int = 2):
"""Creates a string representation of memory size given `number`."""
kb = 1024
number /= kb
if number < 100:
return "{} Ki".format(round(number, precision))
number /= kb
if number < 300:
return "{} Mi".format(round(number, precision))
number /= kb
if number < 900:
return "{} Gi".format(round(number, precision))
number /= kb
if number < 900:
return "{} Ti".format(round(number, precision))
number /= kb
if number < 900:
return "{} Pi".format(round(number, precision))
number /= kb
return "{} Ei".format(round(number, precision))
def number_percentage_format(x, precision: int = None, use_comma: bool = False):
if precision is None:
return x
eps = 0.000000001
comma = "," if use_comma else ""
num_format = (
"{{0:{}.0f}}".format(comma)
if abs(int(x) - x) < eps
else "{{0:{}.{}f}}".format(comma, precision)
)
return num_format.format(x)
def to_percentage(
number, rounding: int = 2, precision: int = None, use_comma: bool = False
):
"""Creates a percentage string representation from the given `number`. The
number is multiplied by 100 before adding a '%' character.
Raises `ValueError` if `number` cannot be converted to a number.
"""
number = float(number) * 100
number_as_int = int(number)
rounded = round(number, rounding)
value = (
number_as_int
if number_as_int == rounded
else number_percentage_format(rounded, precision, use_comma)
)
return "{}%".format(value)
def format_sizeof(num, suffix="B"):
"""
Print in human friendly format
"""
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, "Yi", suffix)
```
#### File: traceml/serialization/base.py
```python
import os
from typing import Dict, List
from polyaxon.utils.path_utils import check_or_create_path
from traceml.events import LoggedEventSpec
from traceml.events.schemas import LoggedEventListSpec
class EventWriter:
EVENTS_BACKEND = "events"
RESOURCES_BACKEND = "resources"
def __init__(self, run_path: str, backend: str):
self._events_backend = backend
self._run_path = run_path
self._files = {} # type: Dict[str, LoggedEventListSpec]
self._closed = False
def _get_event_path(self, kind: str, name: str) -> str:
if self._events_backend == self.EVENTS_BACKEND:
return os.path.join(
self._run_path, self._events_backend, kind, "{}.plx".format(name)
)
if self._events_backend == self.RESOURCES_BACKEND:
return os.path.join(
self._run_path, self._events_backend, kind, "{}.plx".format(name)
)
raise ValueError("Unrecognized backend {}".format(self._events_backend))
def _init_events(self, events_spec: LoggedEventListSpec):
event_path = self._get_event_path(kind=events_spec.kind, name=events_spec.name)
# Check if the file exists otherwise initialize
if not os.path.exists(event_path):
check_or_create_path(event_path, is_dir=False)
with open(event_path, "w") as event_file:
event_file.write(events_spec.get_csv_header())
def _append_events(self, events_spec: LoggedEventListSpec):
event_path = self._get_event_path(kind=events_spec.kind, name=events_spec.name)
with open(event_path, "a") as event_file:
event_file.write(events_spec.get_csv_events())
def _events_to_files(self, events: List[LoggedEventSpec]):
for event in events:
file_name = "{}.{}".format(event.kind, event.name)
if file_name in self._files:
self._files[file_name].events.append(event.event)
else:
self._files[file_name] = LoggedEventListSpec(
kind=event.kind, name=event.name, events=[event.event]
)
self._init_events(self._files[file_name])
def write(self, events: List[LoggedEventSpec]):
if not events:
return
if isinstance(events, LoggedEventSpec):
events = [events]
self._events_to_files(events)
def flush(self):
for file_name in self._files:
events_spec = self._files[file_name]
if events_spec.events:
self._append_events(events_spec)
self._files[file_name].empty_events()
def close(self):
self.flush()
self._closed = True
@property
def closed(self):
return self._closed
class BaseFileWriter:
"""Writes `LoggedEventSpec` to event files.
The `EventFileWriter` class creates a event files in the run path,
and asynchronously writes Events to the files.
"""
def __init__(self, run_path: str):
self._run_path = run_path
check_or_create_path(run_path, is_dir=True)
@property
def run_path(self):
return self._run_path
def add_event(self, event: LoggedEventSpec):
if not isinstance(event, LoggedEventSpec):
raise TypeError("Expected an LoggedEventSpec, " " but got %s" % type(event))
self._async_writer.write(event)
def add_events(self, events: List[LoggedEventSpec]):
for e in events:
if not isinstance(e, LoggedEventSpec):
raise TypeError("Expected an LoggedEventSpec, " " but got %s" % type(e))
self._async_writer.write(events)
def flush(self):
"""Flushes the event files to disk.
Call this method to make sure that all pending events have been
written to disk.
"""
self._async_writer.flush()
def close(self):
"""Performs a final flush of the event files to disk, stops the
write/flush worker and closes the files.
Call this method when you do not need the writer anymore.
"""
self._async_writer.close()
``` |
{
"source": "JinheonBaek/DS-RedBlackTree",
"score": 3
} |
#### File: JinheonBaek/DS-RedBlackTree/main.py
```python
import os
from Node import Node
from Tree import RBT
def search():
filenames = os.listdir('./input/')
filename_lst = []
for filename in filenames:
filename_lst.append(filename)
return filename_lst
def getFileData(names):
data = []
_dir = './input/'
for name in names:
f = open(_dir + name, 'r')
lines = f.readlines()
tmp_data = []
for line in lines:
tmp_data.append(int(line.strip("\n")))
data.append(tmp_data)
f.close()
return data
def main():
#init
names = search()
datas = getFileData(names)
sequence = 0
for data in datas:
rbt = RBT()
for i in data:
if i > 0:
rbt.insert(rbt.root, Node(i))
elif i < 0:
rbt.delete(rbt.root, -i)
else:
break
print("filename = " + names[sequence])
rbt.printNodeCount(rbt.root)
rbt.printInsertNode(rbt.root)
rbt.printDeleteNode(rbt.root)
rbt.printMissNode(rbt.root)
rbt.printBlackNodeCount(rbt.root)
rbt.printBlackHeight(rbt.root)
rbt.inOrderTraversal(rbt.root)
sequence += 1
main()
``` |
{
"source": "JinheonBaek/ProgrammingLanguage",
"score": 4
} |
#### File: ProgrammingLanguage/HW4/main.py
```python
import random
import person, beverage
def make_random_order(menus):
"""
Make random orders (between 1 and 9 beverages). \n
This function uses random sampling with replacement.
Return:
list of sampled beverages (not instanced yet)
"""
return random.choices(list(menus.values()), k = random.randrange(1, 10))
def processCustomerOrder(employee, customer, menus):
"""
Process customer with one selected employee. \n
"""
## Selected employee sets current customer
employee.currentCustomer = customer
## Customer requests menu using get_menu() function
### In this setting, get_menu() return only true
if customer.get_menu():
employee.print_menu(menus = menus)
## Customer makes order with order list randomly produced
customer.make_order(make_random_order(menus))
## Print time of the ordered beverages.
## After that, print price of the ordered beverages.
print("소요시간은 {}초 입니다.".format(employee.get_time(customer = employee.currentCustomer)))
print("주문하신 음료에 대한 가격은 {}원 입니다.".format(employee.get_price(customer = employee.currentCustomer)))
def main():
## employee list (our cafe shop has four employees)
employeeList = [
person.Employee('창기'),
person.Employee('재용'),
person.Employee('건형'),
person.Employee('은혁')
]
## Beverage menus (our cafe shop has below beverages)
menus = {
'Americano': beverage.Americano,
'CafeLatte': beverage.CafeLatte,
'Cappuccino': beverage.Cappuccino,
'VanillaLatte': beverage.VanillaLatte,
'CafeMocha': beverage.CafeMocha,
'CaramelMaki': beverage.CaramelMaki,
'HotChocolate': beverage.HotChocolate,
'MintChocolate': beverage.MintChocolate
}
## Customer Queue
### Using append, new customer is comming
### Using customerList[1:], dequeue customer that ordered beverages
customerList = []
## Customer '시우' comes into the cafe
customerList.append(person.Customer('시우'))
## Process the customer in the queue.
for customer in customerList:
processCustomerOrder(random.choice(employeeList), customer, menus)
customerList = customerList[1:]
## Customer '소영', '진헌', '하윤' come into the cafe
customerList.append(person.KoreaUniver('소영'))
customerList.append(person.KoreaUniver('진헌'))
customerList.append(person.Youth('하윤'))
## Process the customer in the queue.
for customer in customerList:
processCustomerOrder(random.choice(employeeList), customer, menus)
customerList = customerList[1:]
## Customer '민준', '예준' come into the cafe
customerList.append(person.Customer('민준'))
customerList.append(person.Youth('예준'))
## Process the customer in the queue.
for customer in customerList:
processCustomerOrder(random.choice(employeeList), customer, menus)
customerList = customerList[1:]
if __name__ == '__main__':
main()
``` |
{
"source": "JinheonBaek/pytorch_geometric",
"score": 2
} |
#### File: torch_geometric/data/separate.py
```python
from typing import Any
from collections.abc import Mapping, Sequence
from torch import Tensor
from torch_sparse import SparseTensor
from torch_geometric.data.data import BaseData
from torch_geometric.data.storage import BaseStorage
def separate(cls, batch: BaseData, idx: int, slice_dict: Any,
inc_dict: Any = None, decrement: bool = True) -> BaseData:
# Separates the individual element from a `batch` at index `idx`.
# `separate` can handle both homogeneous and heterogeneous data objects by
# individually separating all their stores.
# In addition, `separate` can handle nested data structures such as
# dictionaries and lists.
data = cls().stores_as(batch)
# We iterate over each storage object and recursively separate all its
# attributes:
for batch_store, data_store in zip(batch.stores, data.stores):
key = batch_store._key
if key is not None:
attrs = slice_dict[key].keys()
else:
attrs = [
attr for attr in slice_dict.keys()
if attr in set(batch_store.keys())
]
for attr in attrs:
if key is not None:
slices = slice_dict[key][attr]
incs = inc_dict[key][attr] if decrement else None
else:
slices = slice_dict[attr]
incs = inc_dict[attr] if decrement else None
data_store[attr] = _separate(attr, batch_store[attr], idx, slices,
incs, batch, batch_store, decrement)
# The `num_nodes` attribute needs special treatment, as we cannot infer
# the real number of nodes from the total number of nodes alone:
if 'num_nodes' in batch_store:
data_store.num_nodes = batch_store._num_nodes[idx]
return data
def _separate(
key: str,
value: Any,
idx: int,
slices: Any,
incs: Any,
batch: BaseData,
store: BaseStorage,
decrement: bool,
) -> Any:
if isinstance(value, Mapping):
# Recursively separate elements of dictionaries.
return {
key: _separate(key, elem, idx, slices[key],
incs[key] if decrement else None, batch, store,
decrement)
for key, elem in value.items()
}
elif (isinstance(value, Sequence) and isinstance(value[0], Sequence)
and not isinstance(value[0], str)
and isinstance(value[0][0], (Tensor, SparseTensor))):
# Recursively separate elements of lists of lists.
return [
_separate(key, elem, idx, slices[i],
incs[i] if decrement else None, batch, store, decrement)
for i, elem in enumerate(value)
]
elif isinstance(value, Tensor):
# Narrow a `torch.Tensor` based on `slices`.
# NOTE: We need to take care of decrementing elements appropriately.
cat_dim = batch.__cat_dim__(key, value, store)
start, end = slices[idx], slices[idx + 1]
value = value.narrow(cat_dim or 0, start, end - start)
value = value.squeeze(0) if cat_dim is None else value
if decrement and (incs.dim() > 1 or int(incs[idx]) != 0):
value = value - incs[idx]
return value
elif isinstance(value, SparseTensor) and decrement:
# Narrow a `SparseTensor` based on `slices`.
# NOTE: `cat_dim` may return a tuple to allow for diagonal stacking.
cat_dim = batch.__cat_dim__(key, value, store)
cat_dims = (cat_dim, ) if isinstance(cat_dim, int) else cat_dim
for i, dim in enumerate(cat_dims):
start, end = int(slices[idx][i]), int(slices[idx + 1][i])
value = value.narrow(dim, start, end - start)
return value
else:
return value[idx]
```
#### File: torch_geometric/graphgym/model_builder.py
```python
import torch
from torch_geometric.graphgym.config import cfg
from torch_geometric.graphgym.models.gnn import GNN
import torch_geometric.graphgym.register as register
network_dict = {
'gnn': GNN,
}
network_dict = {**register.network_dict, **network_dict}
def create_model(to_device=True, dim_in=None, dim_out=None):
r"""
Create model for graph machine learning
Args:
to_device (string): The devide that the model will be transferred to
dim_in (int, optional): Input dimension to the model
dim_out (int, optional): Output dimension to the model
"""
dim_in = cfg.share.dim_in if dim_in is None else dim_in
dim_out = cfg.share.dim_out if dim_out is None else dim_out
# binary classification, output dim = 1
if 'classification' in cfg.dataset.task_type and dim_out == 2:
dim_out = 1
model = network_dict[cfg.model.type](dim_in=dim_in, dim_out=dim_out)
if to_device:
model.to(torch.device(cfg.device))
return model
```
#### File: graphgym/models/layer.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_geometric as pyg
from torch_geometric.graphgym.config import cfg
from torch_geometric.graphgym.models.act import act_dict
from torch_geometric.graphgym.contrib.layer.generalconv import (
GeneralConvLayer, GeneralEdgeConvLayer)
import torch_geometric.graphgym.register as register
# General classes
class GeneralLayer(nn.Module):
"""
General wrapper for layers
Args:
name (string): Name of the layer in registered :obj:`layer_dict`
dim_in (int): Input dimension
dim_out (int): Output dimension
has_act (bool): Whether has activation after the layer
has_bn (bool): Whether has BatchNorm in the layer
has_l2norm (bool): Wheter has L2 normalization after the layer
**kwargs (optional): Additional args
"""
def __init__(self, name, dim_in, dim_out, has_act=True, has_bn=True,
has_l2norm=False, **kwargs):
super(GeneralLayer, self).__init__()
self.has_l2norm = has_l2norm
has_bn = has_bn and cfg.gnn.batchnorm
self.layer = layer_dict[name](dim_in, dim_out, bias=not has_bn,
**kwargs)
layer_wrapper = []
if has_bn:
layer_wrapper.append(
nn.BatchNorm1d(dim_out, eps=cfg.bn.eps, momentum=cfg.bn.mom))
if cfg.gnn.dropout > 0:
layer_wrapper.append(
nn.Dropout(p=cfg.gnn.dropout, inplace=cfg.mem.inplace))
if has_act:
layer_wrapper.append(act_dict[cfg.gnn.act])
self.post_layer = nn.Sequential(*layer_wrapper)
def forward(self, batch):
batch = self.layer(batch)
if isinstance(batch, torch.Tensor):
batch = self.post_layer(batch)
if self.has_l2norm:
batch = F.normalize(batch, p=2, dim=1)
else:
batch.x = self.post_layer(batch.x)
if self.has_l2norm:
batch.x = F.normalize(batch.x, p=2, dim=1)
return batch
class GeneralMultiLayer(nn.Module):
"""
General wrapper for a stack of multiple layers
Args:
name (string): Name of the layer in registered :obj:`layer_dict`
num_layers (int): Number of layers in the stack
dim_in (int): Input dimension
dim_out (int): Output dimension
dim_inner (int): The dimension for the inner layers
final_act (bool): Whether has activation after the layer stack
**kwargs (optional): Additional args
"""
def __init__(self, name, num_layers, dim_in, dim_out, dim_inner=None,
final_act=True, **kwargs):
super(GeneralMultiLayer, self).__init__()
dim_inner = dim_in if dim_inner is None else dim_inner
for i in range(num_layers):
d_in = dim_in if i == 0 else dim_inner
d_out = dim_out if i == num_layers - 1 else dim_inner
has_act = final_act if i == num_layers - 1 else True
layer = GeneralLayer(name, d_in, d_out, has_act, **kwargs)
self.add_module('Layer_{}'.format(i), layer)
def forward(self, batch):
for layer in self.children():
batch = layer(batch)
return batch
# ---------- Core basic layers. Input: batch; Output: batch ----------------- #
class Linear(nn.Module):
"""
Basic Linear layer.
Args:
dim_in (int): Input dimension
dim_out (int): Output dimension
bias (bool): Whether has bias term
**kwargs (optional): Additional args
"""
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(Linear, self).__init__()
self.model = nn.Linear(dim_in, dim_out, bias=bias)
def forward(self, batch):
if isinstance(batch, torch.Tensor):
batch = self.model(batch)
else:
batch.x = self.model(batch.x)
return batch
class BatchNorm1dNode(nn.Module):
"""
BatchNorm for node feature.
Args:
dim_in (int): Input dimension
"""
def __init__(self, dim_in):
super(BatchNorm1dNode, self).__init__()
self.bn = nn.BatchNorm1d(dim_in, eps=cfg.bn.eps, momentum=cfg.bn.mom)
def forward(self, batch):
batch.x = self.bn(batch.x)
return batch
class BatchNorm1dEdge(nn.Module):
"""
BatchNorm for edge feature.
Args:
dim_in (int): Input dimension
"""
def __init__(self, dim_in):
super(BatchNorm1dEdge, self).__init__()
self.bn = nn.BatchNorm1d(dim_in, eps=cfg.bn.eps, momentum=cfg.bn.mom)
def forward(self, batch):
batch.edge_attr = self.bn(batch.edge_attr)
return batch
class MLP(nn.Module):
"""
Basic MLP model.
Here 1-layer MLP is equivalent to a Liner layer.
Args:
dim_in (int): Input dimension
dim_out (int): Output dimension
bias (bool): Whether has bias term
dim_inner (int): The dimension for the inner layers
num_layers (int): Number of layers in the stack
**kwargs (optional): Additional args
"""
def __init__(self, dim_in, dim_out, bias=True, dim_inner=None,
num_layers=2, **kwargs):
super(MLP, self).__init__()
dim_inner = dim_in if dim_inner is None else dim_inner
layers = []
if num_layers > 1:
layers.append(
GeneralMultiLayer('linear', num_layers - 1, dim_in, dim_inner,
dim_inner, final_act=True))
layers.append(Linear(dim_inner, dim_out, bias))
else:
layers.append(Linear(dim_in, dim_out, bias))
self.model = nn.Sequential(*layers)
def forward(self, batch):
if isinstance(batch, torch.Tensor):
batch = self.model(batch)
else:
batch.x = self.model(batch.x)
return batch
class GCNConv(nn.Module):
"""
Graph Convolutional Network (GCN) layer
"""
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(GCNConv, self).__init__()
self.model = pyg.nn.GCNConv(dim_in, dim_out, bias=bias)
def forward(self, batch):
batch.x = self.model(batch.x, batch.edge_index)
return batch
class SAGEConv(nn.Module):
"""
GraphSAGE Conv layer
"""
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(SAGEConv, self).__init__()
self.model = pyg.nn.SAGEConv(dim_in, dim_out, bias=bias)
def forward(self, batch):
batch.x = self.model(batch.x, batch.edge_index)
return batch
class GATConv(nn.Module):
"""
Graph Attention Network (GAT) layer
"""
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(GATConv, self).__init__()
self.model = pyg.nn.GATConv(dim_in, dim_out, bias=bias)
def forward(self, batch):
batch.x = self.model(batch.x, batch.edge_index)
return batch
class GINConv(nn.Module):
"""
Graph Isomorphism Network (GIN) layer
"""
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(GINConv, self).__init__()
gin_nn = nn.Sequential(nn.Linear(dim_in, dim_out), nn.ReLU(),
nn.Linear(dim_out, dim_out))
self.model = pyg.nn.GINConv(gin_nn)
def forward(self, batch):
batch.x = self.model(batch.x, batch.edge_index)
return batch
class SplineConv(nn.Module):
"""
SplineCNN layer
"""
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(SplineConv, self).__init__()
self.model = pyg.nn.SplineConv(dim_in, dim_out, dim=1, kernel_size=2,
bias=bias)
def forward(self, batch):
batch.x = self.model(batch.x, batch.edge_index, batch.edge_attr)
return batch
class GeneralConv(nn.Module):
"""A general GNN layer"""
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(GeneralConv, self).__init__()
self.model = GeneralConvLayer(dim_in, dim_out, bias=bias)
def forward(self, batch):
batch.x = self.model(batch.x, batch.edge_index)
return batch
class GeneralEdgeConv(nn.Module):
"""A general GNN layer that supports edge features as well"""
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(GeneralEdgeConv, self).__init__()
self.model = GeneralEdgeConvLayer(dim_in, dim_out, bias=bias)
def forward(self, batch):
batch.x = self.model(batch.x, batch.edge_index,
edge_feature=batch.edge_attr)
return batch
class GeneralSampleEdgeConv(nn.Module):
"""A general GNN layer that supports edge features and edge sampling"""
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(GeneralSampleEdgeConv, self).__init__()
self.model = GeneralEdgeConvLayer(dim_in, dim_out, bias=bias)
def forward(self, batch):
edge_mask = torch.rand(batch.edge_index.shape[1]) < cfg.gnn.keep_edge
edge_index = batch.edge_index[:, edge_mask]
edge_feature = batch.edge_attr[edge_mask, :]
batch.x = self.model(batch.x, edge_index, edge_feature=edge_feature)
return batch
layer_dict = {
'linear': Linear,
'mlp': MLP,
'gcnconv': GCNConv,
'sageconv': SAGEConv,
'gatconv': GATConv,
'splineconv': SplineConv,
'ginconv': GINConv,
'generalconv': GeneralConv,
'generaledgeconv': GeneralEdgeConv,
'generalsampleedgeconv': GeneralSampleEdgeConv,
}
# register additional convs
layer_dict = {**register.layer_dict, **layer_dict}
```
#### File: graphgym/models/pooling.py
```python
from torch_scatter import scatter
import torch_geometric.graphgym.register as register
def global_add_pool(x, batch, size=None):
"""
Globally pool node embeddings into graph embeddings, via elementwise sum.
Pooling function takes in node embedding [num_nodes x emb_dim] and
batch (indices) and outputs graph embedding [num_graphs x emb_dim].
Args:
x (torch.tensor): Input node embeddings
batch (torch.tensor): Batch tensor that indicates which node
belongs to which graph
size (optional): Total number of graphs. Can be auto-inferred.
Returns: Pooled graph embeddings
"""
size = batch.max().item() + 1 if size is None else size
return scatter(x, batch, dim=0, dim_size=size, reduce='add')
def global_mean_pool(x, batch, size=None):
"""
Globally pool node embeddings into graph embeddings, via elementwise mean.
Pooling function takes in node embedding [num_nodes x emb_dim] and
batch (indices) and outputs graph embedding [num_graphs x emb_dim].
Args:
x (torch.tensor): Input node embeddings
batch (torch.tensor): Batch tensor that indicates which node
belongs to which graph
size (optional): Total number of graphs. Can be auto-inferred.
Returns: Pooled graph embeddings
"""
size = batch.max().item() + 1 if size is None else size
return scatter(x, batch, dim=0, dim_size=size, reduce='mean')
def global_max_pool(x, batch, size=None):
"""
Globally pool node embeddings into graph embeddings, via elementwise max.
Pooling function takes in node embedding [num_nodes x emb_dim] and
batch (indices) and outputs graph embedding [num_graphs x emb_dim].
Args:
x (torch.tensor): Input node embeddings
batch (torch.tensor): Batch tensor that indicates which node
belongs to which graph
size (optional): Total number of graphs. Can be auto-inferred.
Returns: Pooled graph embeddings
"""
size = batch.max().item() + 1 if size is None else size
return scatter(x, batch, dim=0, dim_size=size, reduce='max')
pooling_dict = {
'add': global_add_pool,
'mean': global_mean_pool,
'max': global_max_pool
}
pooling_dict = {**register.pooling_dict, **pooling_dict}
``` |
{
"source": "jinho10/openshift-client-python",
"score": 2
} |
#### File: packages/openshift/__init__.py
```python
from __future__ import absolute_import
from .context import *
from .base_verbs import *
from .model import OpenShiftPythonException
from .model import Model, Missing
from .selector import *
from .apiobject import *
from . import naming
from . import status
from . import config
from .ansible import ansible
# Single source for module version
__VERSION__ = __version__ = '1.0.6'
null = None # Allow scripts to specify null in object definitions
# Allows modules to trigger errors
def error(msg, **kwargs):
raise OpenShiftPythonException(msg, **kwargs)
# Convenience method for accessing the module version
def get_module_version():
return __VERSION__
``` |
{
"source": "Jinho-Choi123/FTP-Server",
"score": 2
} |
#### File: FTP-Server/fileupload/tests.py
```python
from ftp_server.settings import BASE_DIR
import json
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from .models import File, FileGroup
from .serializers import FileGroupSerializer, FileSerializer
import logging
logger = logging.getLogger(__name__)
class FileUploadTestCase1(APITestCase):
def test_fileupload(self):
with open(BASE_DIR/'testfiles/video1.mp4', 'rb') as video1, open(BASE_DIR/'testfiles/video2.mp4', 'rb') as video2, open(BASE_DIR/'testfiles/image1.jpg', 'rb') as image1:
files = {
'uploadfile': video1,
'uploadfile': video2,
'uploadfile': image1,
}
response = self.client.post("/fileupload/", files = files, format='multipart')
self.assertEqual(response.status_code, status.HTTP_200_OK)
```
#### File: FTP-Server/fileupload/views.py
```python
import datetime
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from .models import File, FileGroup
from .serializers import FileSerializer, FileGroupSerializer
from rest_framework.parsers import MultiPartParser, FormParser
import uuid
class FileUpload(APIView):
def post(self, request):
#files is list of file object
files = request.FILES.getlist('uploadfile')
filegroup_ = FileGroup()
for file in files:
file_ = File(group = filegroup_, content = file)
filegroup_.addfile(file_)
file_.save()
filegroup_.save()
filegroupserializer = FileGroupSerializer(filegroup_)
filegroupData= filegroupserializer.data
return Response(filegroupData)
``` |
{
"source": "jinhojang6/ai-detection-practice",
"score": 2
} |
#### File: detection_processing/stastics/test_face_improved.py
```python
import argparse
import cv2 as cv
import numpy as np
import analysis_perframe as pfh
import analysis_stastics
from keras.models import load_model
import time
import sys
sys.path.append('..')
from yolo_utils import infer_image
from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import draw_text
from utils.inference import draw_bounding_box
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.preprocessor import preprocess_input
from statistics import mode
def test_face_improved(path_in, path_out, suffix = 'face_improved'):
parser = argparse.ArgumentParser()
FLAGS, unparsed = parser.parse_known_args()
FLAGS.model_path = '../yolov3-coco/'
FLAGS.weights = '../yolov3-coco/yolov3-wider_16000.weights'
FLAGS.config = '../yolov3-coco/yolov3-face.cfg'
FLAGS.video_path = path_in
FLAGS.video_output_path = f'{path_out}_{suffix}.avi'
FLAGS.labels = '../yolov3-coco/coco-labels'
FLAGS.confidence = 0.1
FLAGS.threshold = 0.3
FLAGS.download_model = False
FLAGS.show_time = False
emotion_model_path = '../models/emotion_model.hdf5'
emotion_classifier = load_model(emotion_model_path)
emotion_target_size = emotion_classifier.input_shape[1:3]
emotion_labels = get_labels('fer2013')
emotion_offsets = (20, 40)
emotion_window = []
frame_window = 10
face_cascade = cv.CascadeClassifier('../models/haarcascade_frontalface_default.xml')
vid = cv.VideoCapture(FLAGS.video_path)
height, width, writer = None, None, None
labels = open(FLAGS.labels).read().strip().split('\n')
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
net = cv.dnn.readNetFromDarknet(FLAGS.config, FLAGS.weights)
layer_names = net.getLayerNames()
layer_names = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
print(f'starting {suffix}')
time_0 = time.time()
frame_number = 0
while True:
grabbed, frame = vid.read()
if not grabbed:
break
else:
frame_number += 1
if width is None or height is None:
height, width = frame.shape[:2]
img, boxes, confidences, classids, idxs = infer_image(net, layer_names, height, width, frame, colors, labels, FLAGS)
gray_image = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
rgb_image = cv.cvtColor(img, cv.COLOR_BGR2RGB)
faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5,
minSize=(30, 30), flags=cv.CASCADE_SCALE_IMAGE)
analysis_stastics.emotions.add_frame()
for face_coordinates in faces:
x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
gray_face = gray_image[y1:y2, x1:x2]
try:
gray_face = cv.resize(gray_face, (emotion_target_size))
except:
continue
gray_face = preprocess_input(gray_face, True)
gray_face = np.expand_dims(gray_face, 0)
gray_face = np.expand_dims(gray_face, -1)
emotion_prediction = emotion_classifier.predict(gray_face)
emotion_probability = np.max(emotion_prediction)
emotion_label_arg = np.argmax(emotion_prediction)
emotion_text = emotion_labels[emotion_label_arg]
emotion_window.append(emotion_text)
analysis_stastics.emotions.add_emotion(emotion_text)
if len(emotion_window) > frame_window:
emotion_window.pop(0)
try:
emotion_mode = mode(emotion_window)
except:
continue
if emotion_text == 'angry':
color = emotion_probability * np.asarray((255, 0, 0))
elif emotion_text == 'sad':
color = emotion_probability * np.asarray((0, 0, 255))
elif emotion_text == 'happy':
color = emotion_probability * np.asarray((255, 255, 0))
elif emotion_text == 'surprise':
color = emotion_probability * np.asarray((0, 255, 255))
else:
color = emotion_probability * np.asarray((0, 255, 0))
color = color.astype(int)
color = color.tolist()
draw_text(face_coordinates, rgb_image, emotion_mode,
color, 0, -45, 1, 1)
img = cv.cvtColor(rgb_image, cv.COLOR_RGB2BGR)
output_array = []
for index in range(len(classids)):
output_array.append({'name' : labels[classids[index]], 'percentage_probability' : confidences[index] * 100})
pfh.per_frame_handler(frame_number, output_array, suffix = suffix)
if writer is None:
fourcc = cv.VideoWriter_fourcc(*"MJPG")
writer = cv.VideoWriter(FLAGS.video_output_path, fourcc, 30, (img.shape[1], img.shape[0]), True)
writer.write(img)
writer.release()
vid.release()
print(f'mode {suffix} finished, elapsed time : {time.time() - time_0}s')
``` |
{
"source": "jinhojang6/opencv-project",
"score": 3
} |
#### File: opencv-project/project_1/thresholding_opt1.py
```python
import sys
import time
import numpy as np
import cv2
print(sys.executable)
print(sys.version)
print(cv2.__version__)
def evaluate_threshold(path, threshold):
cap = cv2.VideoCapture(video_file)
timeP = time.time()
diff_sum = 0
if cap.isOpened():
ret, img = cap.read()
while ret:
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh_image = cv2.threshold(gray_image, threshold, 255, cv2.THRESH_BINARY)
M1 = np.sum(thresh_image) / 255
M0 = np.size(thresh_image) - M1
diff_sum += abs(M0 - M1)
ret, img = cap.read()
print(f'thresh {threshold}: {diff_sum}, elapsed time: {time.time() - timeP}s')
return diff_sum
def evaluate_section(path, value_list = [-1] * 256, init = 0, end = 255, div = 4):
div = min(div, end - init)
thresh_list = [0] * (div + 1)
eval_list = [0] * (div + 1)
for index in range(div + 1):
threshold = init + ((end - init) * index // div)
thresh_list[index] = threshold
if value_list[threshold] < 0:
value_list[threshold] = evaluate_threshold(path, threshold)
eval_list[index] = value_list[threshold]
for index in range(div + 1):
if index == div:
index_min = index
break
if eval_list[index + 1] > eval_list[index]:
index_min = index
break
if div == (end - init):
return thresh_list[index_min], eval_list[index_min]
else:
return evaluate_section(path, value_list, thresh_list[max(0, index_min - 1)], thresh_list[min(div, index_min + 1)], div)
video_file = "./data/butterflies.mp4"
cap = cv2.VideoCapture(video_file)
print(f'frame size: {cap.get(cv2.CAP_PROP_FRAME_WIDTH)} by {cap.get(cv2.CAP_PROP_FRAME_HEIGHT)}')
print(f'frame count: {cap.get(cv2.CAP_PROP_FRAME_COUNT)}')
print(f'pixel count: {cap.get(cv2.CAP_PROP_FRAME_WIDTH) * cap.get(cv2.CAP_PROP_FRAME_HEIGHT) * cap.get(cv2.CAP_PROP_FRAME_COUNT) / 1000000}M')
timeI = time.time()
thresh_opt, diff_opt = evaluate_section(video_file)
print(f'total elapsed time: {time.time() - timeI}s')
print(f'optimal threshold: {thresh_opt} at diff_sum {diff_opt}')
```
#### File: opencv-project/project_2/test_default.py
```python
import os
from imageai.Detection import VideoObjectDetection
import analysis_perframe as pfh
def test_default(path_in, path_out, path_model = os.path.join(os.getcwd() , 'models\\yolo.h5'), speed = 'fast'):
detector = VideoObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath(path_model)
detector.loadModel(detection_speed = speed) #fast, faster, fastest, flash
detector.detectObjectsFromVideo(
input_file_path = path_in,
output_file_path = path_out + '_default',
frames_per_second = 20,
per_frame_function = pfh.per_frame_handler,
minimum_percentage_probability = 30,
return_detected_frame = True
)
``` |
{
"source": "jinhong666/Python",
"score": 2
} |
#### File: AvailabilityMonitor/AmConfig/domainconfig.py
```python
import configparser
class DomainConf:
@property
def DomainName(self):
return self._domainname
@property
def Protocol(self):
return self._protocol
@property
def VIP(self):
return self._vip
@property
def SrcIpList(self):
return self._srcIpList
@property
def UrlDic(self):
return self._urlDic
@property
def UrlValidDic(self):
return self._urlValidDic
def __init__(self,path):
self._parser = configparser.ConfigParser()
self._parser.read(path)
self._domainname = self._parser.get("domain", "domainname")
self._protocol = self._parser.get("domain","protocol")
self._vip = self._parser.get("domain","vip")
self._srcIpList = self._parser.get("domain","srcip").split(',')
self._urlDic = self._readUrlDic()
self._urlValidDic = self._readValidDic()
def _readUrlDic(self):
options = self._parser.options("address")
urlDic = {}
for optionName in options:
urlDic[optionName] = self._parser.get("address",optionName)
return urlDic
def _readValidDic(self):
options = self._parser.options("valid")
validDic = {}
for optionName in options:
validDic[optionName] = self._parser.get("valid",optionName)
return validDic
```
#### File: AvailabilityMonitor/Worker/__init__.py
```python
from DbAccess.dbaccesser import DbAccesser
from WebAccess.webaccesser import WebAccesser
class MainWorker:
def __init__(self,mainConf,logger):
self._logger = logger
self._conf = mainConf
self._db = DbAccesser(self._conf.DbHost, self._conf.DbUserName, self._conf.DbPassword, self._conf.DbName)
self._webAccesser = WebAccesser('')
self._webAccesser.SetUserAgent("ycapp web monitor")
def Work(self):
for domainConf in self._conf.DomainConfs:
self._checkDomain(domainConf)
def _checkDomain(self, domainConf):
for urlKey in list(domainConf.UrlDic.keys()):
url = domainConf.UrlDic[urlKey]
fullUrl = 'http://' + domainConf.DomainName + url
self._webAccesser.SetUrl(fullUrl)
self._checkServerByIP(domainConf.DomainName,domainConf.VIP,True)
for srcIP in domainConf.SrcIpList:
self._checkServerByIP(domainConf.DomainName,srcIP,False)
def _checkServerByIP(self, domainName,ip,isVIP):
self._logger.info("perform domain ip:" + "[" + ip + "]" + domainName)
self._webAccesser.SetDomainIp(ip)
status = self._checkServer()
self._db.RecordMonitor(domainName, self._webAccesser.Url, ip, status, isVIP)
def _checkServer(self):
resData = self._webAccesser.Request()
if len(resData) == 0:
self._logger.info("Error\t" + self._webAccesser.Url)
status = 0
else:
self._logger.info("OK\t" + self._webAccesser.Url)
status = 1
return status
``` |
{
"source": "jinhongtan/calculator3",
"score": 3
} |
#### File: calculator3/src/Calculator.py
```python
class Calculator:
def addition(self,a,b):
return a + b
def substraction(self,a,b):
return a-b
def multiplication(self,a,b):
return a*b
def division(self,a,b):
return float(a)/float(b)
```
#### File: calculator3/src/StatisticsCalc.py
```python
from Calculator import *
import collections
import sys
import math
class StatisticCalculator(Calculator):
#check the list is valid
# 1. not string
# 2. not empty
@staticmethod
def check(data):
if not all(isinstance(item,int) for item in data) or len(data)==0:
print("Your data include string type or is empty,please give valid data")
sys.exit()
# calculate mean of the list
@staticmethod
def mean(data):
StatisticCalculator.check(data)
Calc1=Calculator()
sum = 0
for x in data:
sum= Calc1.addition(x,sum)
return float(sum/len(data))
# calculate median of the list
@staticmethod
def median(data):
StatisticCalculator.check(data)
num_sorted = sorted(data)
length = len(num_sorted)
if length % 2 != 0:
return num_sorted[int(length/2)]
else:
return (num_sorted[int(length/2)]+num_sorted[int((length/2)-1)])/2
# calculate mode of the list
@staticmethod
def mode(data):
StatisticCalculator.check(data)
num = []
tuple = collections.Counter(data).most_common()
num.append(tuple[0][0])
for i in range(len(tuple) - 1):
if tuple[i][1] == tuple[i + 1][1]:
num.append(tuple[i + 1][0])
else:
break
return num
# calculate variance of the list
@staticmethod
def variance(data):
StatisticCalculator.check(data)
average=StatisticCalculator.mean(data)
res = sum((i - average) ** 2 for i in data) / (len(data)-1)
return res
# calculate standard variation of the list
@staticmethod
def stdvar(data):
StatisticCalculator.check(data)
average = StatisticCalculator.mean(data)
res = sum((i - average) ** 2 for i in data) / (len(data)-1)
return math.sqrt(res)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.