metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jiaolang771/aicad",
"score": 2
} |
#### File: aicad/ConCeptCNN/model.py
```python
import numpy as np
import scipy.io as sio
import os
import math
# plot the model
os.environ["PATH"] += os.pathsep + 'C:\\Program Files (x86)\\Graphviz2.38\\bin'
from keras.utils.vis_utils import plot_model
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_curve, roc_auc_score, confusion_matrix, accuracy_score
from keras.layers import Dense, Dropout, BatchNormalization, Activation, Input, Conv2D, Flatten, MaxPooling2D, GlobalAveragePooling2D, AveragePooling2D, concatenate
from keras.models import Sequential, Model
import keras
from indeption_model import inception_module
import scipy.io as sio
import jason
import matplotlib.pyplot as plt
def check_models():
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 1), padding='valid', activation='relu', input_shape=(90, 90, 1), name='conv1'))
model.add(AveragePooling2D((3, 1), strides=(2, 1), name='AVG_pool1'))
model.add(Conv2D(64, kernel_size=(3, 1), padding='valid', activation='relu', name='conv2'))
model.add(AveragePooling2D((3, 1), strides=(2, 1), name='AVG_pool2'))
model.summary()
model = Sequential()
model.add(Conv2D(32, kernel_size=(5, 1), padding='valid', activation='relu', input_shape=(90, 90, 1), name='conv1'))
model.add(AveragePooling2D((3, 1), strides=(2, 1), name='AVG_pool1'))
model.add(Conv2D(64, kernel_size=(5, 1), padding='valid', activation='relu', name='conv2'))
model.add(AveragePooling2D((3, 1), strides=(2, 1), name='AVG_pool2'))
model.summary()
model = Sequential()
model.add(Conv2D(32, kernel_size=(7, 1), padding='valid', activation='relu', input_shape=(90, 90, 1), name='conv1'))
model.add(AveragePooling2D((3, 1), strides=(2, 1), name='AVG_pool1'))
model.add(Conv2D(64, kernel_size=(7, 1), padding='valid', activation='relu', name='conv2'))
model.add(AveragePooling2D((3, 1), strides=(2, 1), name='AVG_pool2'))
model.summary()
model = Sequential()
model.add(Conv2D(64, kernel_size=(90, 1), padding='valid', activation='relu', input_shape=(90, 90, 1), name='conv1'))
model.summary()
def build_fc(input_shape=(90, 90)):
input_data = Input(shape=input_shape)
coarse_channel = Conv2D(64, kernel_size=(1, 3), padding='valid', activation='relu', name='coarse_conv1')(input_data)
coarse_channel = AveragePooling2D((1, 3), strides=(1, 2), name='coarse_AVG_pool1')(coarse_channel)
coarse_channel = Conv2D(128, kernel_size=(1, 3), padding='valid', activation='relu', name='coarse_conv2')(coarse_channel)
coarse_channel = AveragePooling2D((1, 3), strides=(1, 2), name='coarse_AVG_pool2')(coarse_channel)
medium_channel = Conv2D(64, kernel_size=(1, 5), padding='valid', activation='relu', name='medium_conv1')(input_data)
medium_channel = AveragePooling2D((1, 3), strides=(1, 2), name='medium_AVG_pool1')(medium_channel)
medium_channel = Conv2D(128, kernel_size=(1, 5), padding='valid', activation='relu', name='medium_conv2')(medium_channel)
medium_channel = AveragePooling2D((1, 3), strides=(1, 2), name='medium_AVG_pool2')(medium_channel)
fine_channel = Conv2D(64, kernel_size=(1, 7), padding='valid', activation='relu', name='fine_conv1')(input_data)
fine_channel = AveragePooling2D((1, 3), strides=(1, 2), name='fine_AVG_pool1')(fine_channel)
fine_channel = Conv2D(128, kernel_size=(1, 7), padding='valid', activation='relu', name='fine_conv2')(fine_channel)
fine_channel = AveragePooling2D((1, 3), strides=(1, 2), name='fine_AVG_pool2')(fine_channel)
global_channel = Conv2D(128, kernel_size=(1, 90), padding='valid', activation='relu', name='global_conv1')(input_data)
# merge filted data
img_feat = concatenate([coarse_channel, medium_channel, fine_channel, global_channel], axis=2)
img_feat = Flatten()(img_feat)
img_feat = Dense(256, use_bias=False, name='dense1')(img_feat)
img_feat = Dropout(0.5)(img_feat)
img_feat = BatchNormalization()(img_feat)
img_feat = Dense(256, use_bias=False, name='dense2')(img_feat)
img_feat = Dropout(0.5)(img_feat)
img_feat = BatchNormalization()(img_feat)
out = Dense(1, use_bias=False)(img_feat)
out = Activation('sigmoid', name='prediction_layer')(out)
model = Model(inputs=input_data,
outputs=out,
name="Multi-filter-CNN")
return model
```
#### File: aicad/DWMA_segmentation_CNN/CNN_models.py
```python
from keras.models import Model
from keras.layers import Sequenctial, Conv2D
from keras.layers import MaxPooling2D,
from keras.optimizers import RMSprop, Adam, SGD
from keras import backend as K
def get_CNN(ksize, input_shape):
## start experimenting networks
model = Sequential()
## first Cov layer
model.add(Conv2D(8, kernel_size=(ksize, ksize), strides=(1, 1),
activation=act_fun,
padding="valid",
kernel_initializer=initializers.glorot_uniform(),
input_shape=input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
#model.add(Dropout(0.4))
## second Conv layer
model.add(Conv2D(8, kernel_size=(ksize, ksize),
activation=act_fun,
kernel_initializer=initializers.glorot_uniform(),
padding="valid"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
#model.add(Dropout(0.4)
## third Conv layer
model.add(Conv2D(16, (ksize, ksize),
activation='relu',
kernel_initializer=initializers.glorot_uniform(),
padding="valid"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
#model.add(Dropout(0.4))
## fully connected layers 1
model.add(Flatten())
model.add(Dense(10,kernel_initializer=initializers.glorot_uniform()))
model.add(BatchNormalization())
model.add(Activation(act_fun))
#model.add(Dropout(0.4))
## fully connected layers 2
# model.add(Dense(20,kernel_initializer=initializers.glorot_uniform()))
# model.add(BatchNormalization())
# model.add(Activation(act_fun))
#model.add(Dropout(0.4))
# output softmax layer
model.add(Dense(2,kernel_initializer=initializers.glorot_uniform()))
model.add(BatchNormalization())
model.add(Activation('softmax'))
# Compile the model
sgd = SGD(lr=0.1,momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,loss='categorical_crossentropy',metrics=['accuracy'])
return model
``` |
{
"source": "JiaoLiu/style-image",
"score": 2
} |
#### File: style-image/scripts/transformImage.py
```python
import numpy as np
import tensorflow as tf
import ast
import os
from tensorflow.python import pywrap_tensorflow
from matplotlib import pyplot
from matplotlib.pyplot import imshow
import image_utils
import model
import ops
import argparse
import sys
num_styles = 32
imgWidth = 512
imgHeight = 512
channel = 3
checkpoint = "/Users/Jiao/Desktop/TFProject/style-image/checkpoint/multistyle-pastiche-generator-varied.ckpt"
newCkp = "/Users/Jiao/Desktop/TFProject/style-image/checkpoint/multistyle-pastiche-generator-varied.ckpt-1"
# inputImage = np.expand_dims(image_utils.load_np_image(os.path.expanduser("/Users/Jiao/Desktop/TFProject/prisma/data/content.jpg")),0)
inputImage = tf.placeholder(tf.float32,shape=[None,imgWidth,imgHeight,channel],name="input")
styles = tf.placeholder(tf.float32,shape=[num_styles],name="style")
def _style_mixture(which_styles, num_styles):
"""Returns a 1-D array mapping style indexes to weights."""
mixture = np.zeros([num_styles], dtype=np.float32)
for index in which_styles:
mixture[index] = which_styles[index]
return mixture
def print_tensors_in_checkpoint_file(file_name, tensor_name, all_tensors):
"""Prints tensors in a checkpoint file.
If no `tensor_name` is provided, prints the tensor names and shapes
in the checkpoint file.
If `tensor_name` is provided, prints the content of the tensor.
Args:
file_name: Name of the checkpoint file.
tensor_name: Name of the tensor in the checkpoint file to print.
all_tensors: Boolean indicating whether to print all tensors.
"""
try:
reader = pywrap_tensorflow.NewCheckpointReader(file_name)
if all_tensors:
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
print("tensor_name: ", key)
tensor = reader.get_tensor(key)
print(tensor.shape)
print(reader.get_tensor(key))
elif not tensor_name:
print(reader.debug_string().decode("utf-8"))
else:
print("tensor_name: ", tensor_name)
tensor = reader.get_tensor(tensor_name)
# tf.where(tf.is_nan(tensor), tf.zeros_like(tensor), tensor).eval()
print(tensor)
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
if ("Data loss" in str(e) and
(any([e in file_name for e in [".index", ".meta", ".data"]]))):
proposed_file = ".".join(file_name.split(".")[0:-1])
v2_file_error_template = """
It's likely that this is a V2 checkpoint and you need to provide the filename
*prefix*. Try removing the '.' and extension. Try:
inspect checkpoint --file_name = {}"""
print(v2_file_error_template.format(proposed_file))
with tf.name_scope(""):
# mixture = _style_mixture({18: 1.0}, num_styles)
transform = model.transform(inputImage,
normalizer_fn=ops.weighted_instance_norm,
normalizer_params={
# 'weights': tf.constant(mixture),
'weights' : styles,
'num_categories': num_styles,
'center': True,
'scale': True})
model_saver = tf.train.Saver(tf.global_variables())
with tf.Session() as sess:
# for node in sess.graph.as_graph_def().node:
# print node
# print_tensors_in_checkpoint_file(newCkp,tensor_name="transformer/contract/conv1/weights",all_tensors=True)
# tf.train.write_graph(sess.graph_def, "/Users/Jiao/Desktop/TFProject/style-image/protobuf", "input.pb")
checkpoint = os.path.expanduser(newCkp)
if tf.gfile.IsDirectory(checkpoint):
checkpoint = tf.train.latest_checkpoint(checkpoint)
tf.logging.info('loading latest checkpoint file: {}'.format(checkpoint))
model_saver.restore(sess, checkpoint)
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
W = sess.graph.as_graph_element(key+":0")
if (len(W.shape) == 4):
P = tf.transpose(W, perm=[3, 0, 1, 2])
Y = tf.where(tf.is_nan(P), tf.zeros(P.get_shape()), P).eval()
name = key.replace("/", "_")
print name,Y
Y.tofile("/Users/Jiao/Desktop/TFProject/style-image/parameters/" + name)
# Y = tf.constant(0.25,shape=W.get_shape()).eval()
X = tf.where(tf.is_nan(W), tf.zeros(W.get_shape()), W).eval()
W = tf.assign(W,X).eval()
# name = key.replace("/", "_")
# W.tofile("/Users/Jiao/Desktop/TFProject/style-image/parameters/" + name)
# W = tf.assign(W, tf.zeros(W.get_shape())).eval()
# W = sess.graph.get_tensor_by_name("transformer/contract/conv1/weights:0")
newstyle = np.zeros([num_styles], dtype=np.float32)
newstyle[31] = 1
newImage = np.expand_dims(image_utils.load_np_image(os.path.expanduser("/Users/Jiao/Desktop/IMG_0898.JPG")),0)
# newImage = np.zeros((1,imgWidth,imgHeight,channel))
# newImage = tf.constant(255,shape=[1,imgWidth,imgHeight,channel]).eval()
style_image = transform.eval(feed_dict={inputImage:newImage,styles:newstyle})
# style_image = output.eval(feed_dict={inputImage:newImage})
# style_image = style_image[0]
# print(style_image)
# imshow(style_image)
# pyplot.show()
# model_saver.save(sess, newCkp)
``` |
{
"source": "Jiaolong/gcn-parking-slot",
"score": 2
} |
#### File: psdet/models/builder.py
```python
from ..utils.registry import build_from_cfg
from torch import nn
from .registry import (
POINT_DETECTOR
)
def build(cfg, registry, default_args=None):
if isinstance(cfg, list):
modules = [build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg]
return nn.Sequential(*modules)
else:
return build_from_cfg(cfg, registry, default_args)
def build_model(cfg, train_cfg=None, test_cfg=None):
return build(cfg, POINT_DETECTOR)
```
#### File: models/point_detector/utils.py
```python
import torch
import math
from torch import nn
import torchvision
from torch.utils import model_zoo
from torchvision.models.resnet import BasicBlock, model_urls, Bottleneck
def define_squeeze_unit(basic_channel_size):
"""Define a 1x1 squeeze convolution with norm and activation."""
conv = nn.Conv2d(2 * basic_channel_size, basic_channel_size, kernel_size=1,
stride=1, padding=0, bias=False)
norm = nn.BatchNorm2d(basic_channel_size)
relu = nn.LeakyReLU(0.1)
layers = [conv, norm, relu]
return layers
def define_expand_unit(basic_channel_size):
"""Define a 3x3 expand convolution with norm and activation."""
conv = nn.Conv2d(basic_channel_size, 2 * basic_channel_size, kernel_size=3,
stride=1, padding=1, bias=False)
norm = nn.BatchNorm2d(2 * basic_channel_size)
relu = nn.LeakyReLU(0.1)
layers = [conv, norm, relu]
return layers
def define_halve_unit(basic_channel_size):
"""Define a 4x4 stride 2 expand convolution with norm and activation."""
conv = nn.Conv2d(basic_channel_size, 2 * basic_channel_size, kernel_size=4,
stride=2, padding=1, bias=False)
norm = nn.BatchNorm2d(2 * basic_channel_size)
relu = nn.LeakyReLU(0.1)
layers = [conv, norm, relu]
return layers
def define_depthwise_expand_unit(basic_channel_size):
"""Define a 3x3 expand convolution with norm and activation."""
conv1 = nn.Conv2d(basic_channel_size, 2 * basic_channel_size,
kernel_size=1, stride=1, padding=0, bias=False)
norm1 = nn.BatchNorm2d(2 * basic_channel_size)
relu1 = nn.LeakyReLU(0.1)
conv2 = nn.Conv2d(2 * basic_channel_size, 2 * basic_channel_size, kernel_size=3,
stride=1, padding=1, bias=False, groups=2 * basic_channel_size)
norm2 = nn.BatchNorm2d(2 * basic_channel_size)
relu2 = nn.LeakyReLU(0.1)
layers = [conv1, norm1, relu1, conv2, norm2, relu2]
return layers
def define_detector_block(basic_channel_size):
"""Define a unit composite of a squeeze and expand unit."""
layers = []
layers += define_squeeze_unit(basic_channel_size)
layers += define_expand_unit(basic_channel_size)
return layers
class YetAnotherDarknet(nn.modules.Module):
"""Yet another darknet, imitating darknet-53 with depth of darknet-19."""
def __init__(self, input_channel_size, depth_factor):
super(YetAnotherDarknet, self).__init__()
layers = []
# 0
layers += [nn.Conv2d(input_channel_size, depth_factor, kernel_size=3,
stride=1, padding=1, bias=False)]
layers += [nn.BatchNorm2d(depth_factor)]
layers += [nn.LeakyReLU(0.1)]
# 1
layers += define_halve_unit(depth_factor)
layers += define_detector_block(depth_factor)
# 2
depth_factor *= 2
layers += define_halve_unit(depth_factor)
layers += define_detector_block(depth_factor)
# 3
depth_factor *= 2
layers += define_halve_unit(depth_factor)
layers += define_detector_block(depth_factor)
layers += define_detector_block(depth_factor)
# 4
depth_factor *= 2
layers += define_halve_unit(depth_factor)
layers += define_detector_block(depth_factor)
layers += define_detector_block(depth_factor)
# 5
depth_factor *= 2
layers += define_halve_unit(depth_factor)
layers += define_detector_block(depth_factor)
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
# vgg backbone
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 1024, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def vgg16(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['D']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
return model
class ResNet18(nn.Module):
def __init__(self, block, layers, aux_classes=1000, classes=100, domains=3):
self.inplanes = 64
super(ResNet18, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 1024, layers[3], stride=2)#resnet 18
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def is_patch_based(self):
return False
def forward(self, x, **kwargs):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet18(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
return model
class ResNet50(nn.Module):
def __init__(self, block, layers, aux_classes=1000, classes=100, domains=3):
self.inplanes = 64
super(ResNet50, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 256, layers[3], stride=2) #resnet50
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def is_patch_based(self):
return False
def forward(self, x, **kwargs):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet50(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']), strict=False)
return model
```
#### File: psdet/utils/common.py
```python
import os
import sys
import torch
import pickle
import shutil
import logging
import datetime
import numpy as np
import subprocess
from pathlib import Path
import random as pyrandom
import torch.distributed as dist
from .dist import get_dist_info
def scan_upsample(points_array, input_rings=32, vertical_fov=26.8, bottom_angle=-24.8):
im0, inds_e, inds_o = scan_to_range(points_array, input_rings, vertical_fov, bottom_angle)
h, w, c = im0.shape
points_new = []
for i in range(h - 1):
for j in range(w):
d1, t1, v_angle1, h_angle1 = im0[i, j, :]
d2, t2, v_angle2, h_angle2 = im0[i + 1, j, :]
if d1 != 0 and d2 != 0:
t = (t1 + t2) * 0.5
d = (d1 + d2) * 0.5
v_angle = (v_angle1 + v_angle2) * 0.5
h_angle = (h_angle1 + h_angle2) * 0.5
x = np.sin(h_angle) * np.cos(v_angle) * d
y = np.cos(h_angle) * np.cos(v_angle) * d
z = np.sin(v_angle) * d
point = np.array([x, y, z, t])
points_new.append(point)
points_new = np.array(points_new)
points_hr = np.vstack((points_array, points_new))
return points_hr
def scan_downsample(points_array, input_rings=64, vertical_fov=26.8, bottom_angle=-24.8, output_rings='even'):
range_image, inds_e, inds_o = scan_to_range(points_array, input_rings, vertical_fov, bottom_angle)
if output_rings == 'even':
return points_array[inds_e,:4]
elif output_rings == 'odd':
return points_array[inds_o,:4]
elif output_rings == 'even_or_odd':
even = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if even:
return points_array[inds_e,:4]
return points_array[inds_o,:4]
elif output_rings == 'random':
inds = inds_e + inds_o
np.random.shuffle(inds)
inds = inds[:int(len(inds) * 0.5)]
return points_array[inds,:4]
else:
raise ValueError('Unknown output_rings value: %s', output_rings)
def range_to_scan(range_image, num_rings=64, vertical_fov=26.8, bottom_angle=-24.8):
max_range = 80.0
min_range = 2.0
image_cols = 1024
ang_res_x = 360.0 / float(image_cols) # horizontal resolution
ang_res_y = vertical_fov / float(num_rings - 1) # vertical resolution
row_ids = np.arange(num_rings)
col_ids = np.arange(image_cols)
v_angles = np.float32(row_ids * ang_res_y) + bottom_angle
h_angles = np.float32(col_ids + 1 - image_cols / 2) * ang_res_x + 90
v_angles = v_angles / 180.0 * np.pi
h_angles = h_angles / 180.0 * np.pi
range_image[:,:,0] *= max_range
h, w, c = range_image.shape
points = []
inds_even = []
inds_odd = []
for i in range(h):
for j in range(w):
depth, intensity = range_image[i, j, :]
if depth < min_range:
continue
h_angle = h_angles[j]
v_angle = v_angles[i]
x = np.sin(h_angle) * np.cos(v_angle) * depth
y = np.cos(h_angle) * np.cos(v_angle) * depth
z = np.sin(v_angle) * depth
point = np.array([x, y, z, int(intensity)])
points.append(point)
idx = len(points) - 1
if i % 2 == 0:
inds_even.append(idx)
else:
inds_odd.append(idx)
return np.array(points), inds_even, inds_odd
def scan_to_range(points_array, input_rings=64, vertical_fov=26.8, bottom_angle=-24.8, normalize=False):
# range image size, depends on your sensor, i.e., VLP-16: 16x1800, OS1-64: 64x1024
image_rows_full = input_rings
max_range = 80.0
min_range = 2.0
image_cols = 1024
ang_res_x = 360.0 / float(image_cols) # horizontal resolution
ang_res_y = vertical_fov / float(image_rows_full - 1) # vertical resolution
ang_start_y = bottom_angle
# project points to range image
# channels: range, intensity, horizon_angle, vertical_angle
range_image = np.zeros((image_rows_full, image_cols, 4), dtype=np.float32)
x = points_array[:,0]
y = points_array[:,1]
z = points_array[:,2]
t = points_array[:,3]
# find row id
vertical_angle = np.arctan2(z, np.sqrt(x * x + y * y)) * 180.0 / np.pi
relative_vertical_angle = vertical_angle - ang_start_y
rowId = np.int_(np.round_(relative_vertical_angle / ang_res_y))
# find column id
horitontal_angle = np.arctan2(x, y) * 180.0 / np.pi
colId = -np.int_((horitontal_angle - 90.0) / ang_res_x) + image_cols / 2;
shift_ids = np.where(colId>=image_cols)
colId[shift_ids] = colId[shift_ids] - image_cols
# filter range
thisRange = np.sqrt(x * x + y * y + z * z)
thisRange[thisRange > max_range] = 0
thisRange[thisRange < min_range] = 0
if normalize:
thisRange /= max_range
# save range info to range image
inds = []
inds_odd_row = []
inds_even_row = []
for i in range(len(thisRange)):
if rowId[i] < 0 or rowId[i] >= image_rows_full or colId[i] < 0 or colId[i] >= image_cols:
continue
range_image[int(rowId[i]), int(colId[i]), 0] = thisRange[i]
range_image[int(rowId[i]), int(colId[i]), 1] = t[i]
range_image[int(rowId[i]), int(colId[i]), 2] = vertical_angle[i] * np.pi / 180.0
range_image[int(rowId[i]), int(colId[i]), 3] = horitontal_angle[i] * np.pi / 180.0
if thisRange[i] > 0:
inds.append(i)
if rowId[i] % 2 == 0:
inds_even_row.append(i)
else:
inds_odd_row.append(i)
return range_image, inds_even_row, inds_odd_row
def check_numpy_to_torch(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
return x, False
def get_voxel_centers(voxel_coords, downsample_times, voxel_size, point_cloud_range):
"""
Args:
voxel_coords: (N, 3)
downsample_times:
voxel_size:
point_cloud_range:
Returns:
"""
assert voxel_coords.shape[1] == 3
voxel_centers = voxel_coords[:, [2, 1, 0]].float() # (xyz)
voxel_size = torch.tensor(voxel_size, device=voxel_centers.device).float() * downsample_times
pc_range = torch.tensor(point_cloud_range[0:3], device=voxel_centers.device).float()
voxel_centers = (voxel_centers + 0.5) * voxel_size + pc_range
return voxel_centers
def keep_arrays_by_name(gt_names, used_classes):
inds = [i for i, x in enumerate(gt_names) if x in used_classes]
inds = np.array(inds, dtype=np.int64)
return inds
def set_random_seed(seed=3):
pyrandom.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def get_logger(logdir, name):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
ts = str(datetime.datetime.now()).split(".")[0].replace(" ", "_")
ts = ts.replace(":", "_").replace("-", "_")
file_path = Path(logdir) / "run_{}.log".format(ts)
file_hdlr = logging.FileHandler(str(file_path))
file_hdlr.setFormatter(formatter)
strm_hdlr = logging.StreamHandler(sys.stdout)
strm_hdlr.setFormatter(formatter)
logger.addHandler(file_hdlr)
logger.addHandler(strm_hdlr)
return logger
def merge_results_dist(result_part, size, tmpdir):
rank, world_size = get_dist_info()
os.makedirs(tmpdir, exist_ok=True)
dist.barrier()
pickle.dump(result_part, open(os.path.join(tmpdir, 'result_part_{}.pkl'.format(rank)), 'wb'))
dist.barrier()
if rank != 0:
return None
part_list = []
for i in range(world_size):
part_file = os.path.join(tmpdir, 'result_part_{}.pkl'.format(i))
part_list.append(pickle.load(open(part_file, 'rb')))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
shutil.rmtree(tmpdir)
return ordered_results
```
#### File: psdet/utils/geometry.py
```python
import numba
import torch
import numpy as np
from .common import check_numpy_to_torch
def limit_period(val, offset=0.5, period=np.pi):
val, is_numpy = check_numpy_to_torch(val)
ans = val - torch.floor(val / period + offset) * period
return ans.numpy() if is_numpy else ans
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def camera_to_lidar(points, r_rect, velo2cam):
pts = np.concatenate(
[points[:, :3], np.ones([points.shape[0], 1])], axis=1)
pts = pts @ np.linalg.inv((r_rect @ velo2cam).T)
points[:, :3] = pts[:, :3]
return points
def lidar_to_camera(points, r_rect, velo2cam):
pts = np.concatenate(
[points[:, :3], np.ones([points.shape[0], 1])], axis=1)
pts = pts @ (r_rect @ velo2cam).T
points[:, :3] = pts[:, :3]
return points
def box_camera_to_lidar(data, r_rect, velo2cam):
xyz = data[:, 0:3]
l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam)
return np.concatenate([xyz_lidar, w, l, h, r], axis=1)
def box_lidar_to_camera(data, r_rect, velo2cam):
xyz_lidar = data[:, 0:3]
w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam)
return np.concatenate([xyz, l, h, w, r], axis=1)
def projection_matrix_to_CRT_kitti(P):
"""
将投影矩阵P利用QR分解分解出摄像机内外参数
输入:
P:投影矩阵,3*4
输出:
K:内参数矩阵,3*3
R:旋转矩阵,3*3
T:平移向量,3*1
"""
# P = K @ [R|T]
# K is upper triangular matrix, so we need to inverse CR and use QR
# stable for all kitti camera projection matrix
CR = P[0:3, 0:3]
CT = P[0:3, 3]
RinvCinv = np.linalg.inv(CR)
Rinv, Cinv = np.linalg.qr(RinvCinv)
K = np.linalg.inv(Cinv)
R = np.linalg.inv(Rinv)
T = Cinv @ CT
return K, R, T
def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100):
fku = C[0, 0]
fkv = -C[1, 1]
u0v0 = C[0:2, 2]
z_points = np.array([near_clip] * 4 + [far_clip] *
4, dtype=C.dtype)[:, np.newaxis]
b = bbox_image
box_corners = np.array(
[[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype
)
near_box_corners = (box_corners - u0v0) / np.array(
[fku / near_clip, -fkv / near_clip], dtype=C.dtype
)
far_box_corners = (box_corners - u0v0) / np.array(
[fku / far_clip, -fkv / far_clip], dtype=C.dtype
)
ret_xy = np.concatenate(
[near_box_corners, far_box_corners], axis=0) # [8, 2]
ret_xyz = np.concatenate([ret_xy, z_points], axis=1)
return ret_xyz
@numba.jit(nopython=True)
def corner_to_surfaces_3d_jit(corners):
"""convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
"""
# box_corners: [N, 8, 3], must from corner functions in this module
num_boxes = corners.shape[0]
surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype)
corner_idxes = np.array(
[0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7]
).reshape(6, 4)
for i in range(num_boxes):
for j in range(6):
for k in range(4):
surfaces[i, j, k] = corners[i, corner_idxes[j, k]]
return surfaces
def points_in_convex_polygon_3d_jit(points, polygon_surfaces, num_surfaces=None):
"""check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
"""
max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]
num_points = points.shape[0]
num_polygons = polygon_surfaces.shape[0]
if num_surfaces is None:
num_surfaces = np.full((num_polygons,), 9999999, dtype=np.int64)
normal_vec, d = surface_equ_3d_jitv2(polygon_surfaces[:, :, :3, :])
# normal_vec: [num_polygon, max_num_surfaces, 3]
# d: [num_polygon, max_num_surfaces]
return _points_in_convex_polygon_3d_jit(
points, polygon_surfaces, normal_vec, d, num_surfaces
)
@numba.njit
def surface_equ_3d_jitv2(surfaces):
# polygon_surfaces: [num_polygon, num_surfaces, num_points_of_polygon, 3]
num_polygon = surfaces.shape[0]
max_num_surfaces = surfaces.shape[1]
normal_vec = np.zeros((num_polygon, max_num_surfaces, 3), dtype=surfaces.dtype)
d = np.zeros((num_polygon, max_num_surfaces), dtype=surfaces.dtype)
sv0 = surfaces[0, 0, 0] - surfaces[0, 0, 1]
sv1 = surfaces[0, 0, 0] - surfaces[0, 0, 1]
for i in range(num_polygon):
for j in range(max_num_surfaces):
sv0[0] = surfaces[i, j, 0, 0] - surfaces[i, j, 1, 0]
sv0[1] = surfaces[i, j, 0, 1] - surfaces[i, j, 1, 1]
sv0[2] = surfaces[i, j, 0, 2] - surfaces[i, j, 1, 2]
sv1[0] = surfaces[i, j, 1, 0] - surfaces[i, j, 2, 0]
sv1[1] = surfaces[i, j, 1, 1] - surfaces[i, j, 2, 1]
sv1[2] = surfaces[i, j, 1, 2] - surfaces[i, j, 2, 2]
normal_vec[i, j, 0] = sv0[1] * sv1[2] - sv0[2] * sv1[1]
normal_vec[i, j, 1] = sv0[2] * sv1[0] - sv0[0] * sv1[2]
normal_vec[i, j, 2] = sv0[0] * sv1[1] - sv0[1] * sv1[0]
d[i, j] = (
-surfaces[i, j, 0, 0] * normal_vec[i, j, 0]
- surfaces[i, j, 0, 1] * normal_vec[i, j, 1]
- surfaces[i, j, 0, 2] * normal_vec[i, j, 2]
)
return normal_vec, d
@numba.njit
def _points_in_convex_polygon_3d_jit(
points, polygon_surfaces, normal_vec, d, num_surfaces=None
):
"""check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
"""
max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]
num_points = points.shape[0]
num_polygons = polygon_surfaces.shape[0]
ret = np.ones((num_points, num_polygons), dtype=np.bool_)
sign = 0.0
for i in range(num_points):
for j in range(num_polygons):
for k in range(max_num_surfaces):
if k > num_surfaces[j]:
break
sign = (
points[i, 0] * normal_vec[j, k, 0]
+ points[i, 1] * normal_vec[j, k, 1]
+ points[i, 2] * normal_vec[j, k, 2]
+ d[j, k]
)
if sign >= 0:
ret[i, j] = False
break
return ret
def mask_points_by_range(points, limit_range):
mask = (points[:, 0] >= limit_range[0]) & (points[:, 0] <= limit_range[3]) \
& (points[:, 1] >= limit_range[1]) & (points[:, 1] <= limit_range[4])
return mask
def remove_outside_points(points, rect, Trv2c, P2, image_shape):
# 5x faster than remove_outside_points_v1(2ms vs 10ms)
C, R, T = projection_matrix_to_CRT_kitti(P2)
image_bbox = [0, 0, image_shape[1], image_shape[0]]
frustum = get_frustum(image_bbox, C)
frustum -= T
frustum = np.linalg.inv(R) @ frustum.T
frustum = camera_to_lidar(frustum.T, rect, Trv2c)
frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...])
indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces)
points = points[indices.reshape([-1])]
return points
```
#### File: psdet/utils/registry.py
```python
import inspect
class Registry(object):
def __init__(self, name):
self.name = name
self.obj_dict = {}
def get(self, key):
if not key in self.obj_dict:
raise ValueError("{} is not a registered class.".format(key))
return self.obj_dict.get(key, None)
def register(self, cls):
if not inspect.isclass(cls):
raise TypeError('module must be a class, but got {}'.format(cls))
cls_name = cls.__name__
if cls_name in self.obj_dict:
raise KeyError("{} is already registered in {}".format(cls_name, self.name))
self.obj_dict[cls_name] = cls
return cls
def build_from_cfg(cfg, registry, default_args=None):
"""
Build an object from config dict.
"""
obj_type = cfg.pop('type')
obj_class = registry.get(obj_type)
if default_args is not None:
return obj_class(cfg=cfg, **default_args)
else:
return obj_class(cfg=cfg)
```
#### File: tools/eval_utils/eval_utils.py
```python
import tqdm
import time
import pickle
import cv2
import numpy as np
import torch
from psdet.utils import common, dist
from psdet.models import load_data_to_gpu
def statistics_info(cfg, ret_dict, metric, disp_dict):
for cur_thresh in cfg.model.post_processing.recall_thresh_list:
metric['recall_roi_%s' % str(cur_thresh)] += ret_dict.get('roi_%s' % str(cur_thresh), 0)
metric['recall_rcnn_%s' % str(cur_thresh)] += ret_dict.get('rcnn_%s' % str(cur_thresh), 0)
metric['gt_num'] += ret_dict.get('gt', 0)
min_thresh = cfg.model.post_processing.recall_thresh_list[0]
disp_dict['recall_%s' % str(min_thresh)] = \
'(%d, %d) / %d' % (metric['recall_roi_%s' % str(min_thresh)], metric['recall_rcnn_%s' % str(min_thresh)], metric['gt_num'])
def eval_point_detection(cfg, model, dataloader, logger, dist_test=False, save_to_file=False, result_dir=None):
result_dir.mkdir(parents=True, exist_ok=True)
dataset = dataloader.dataset
if dist_test:
num_gpus = torch.cuda.device_count()
local_rank = cfg.local_rank % num_gpus
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[local_rank],
broadcast_buffers=False
)
model.eval()
if cfg.local_rank == 0:
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True)
start_time = time.time()
point_pred_list = []
point_gt_list = []
slot_pred_list = []
slot_gt_list = []
for i, batch_dict in enumerate(dataloader):
load_data_to_gpu(batch_dict)
with torch.no_grad():
pred_dicts, ret_dict = model(batch_dict)
points_pred = pred_dicts['points_pred']
point_pred_list += points_pred
slots_pred = pred_dicts['slots_pred']
slot_pred_list += slots_pred
marks_gt_batch = batch_dict['marks']
match_targets = batch_dict['match_targets']
npoints = batch_dict['npoints']
for b, marks_gt in enumerate(marks_gt_batch):
n = npoints[b].long()
marks = marks_gt[:n].cpu().numpy()
point_gt_list.append(marks)
match = match_targets[b][:n].cpu().numpy()
slots = []
for j, m in enumerate(match[:,0]):
if m >= 0 and m < n:
x1, y1 = marks[j,:2]
x2, y2 = marks[int(m),:2]
slot = np.array([x1, y1, x2, y2])
slots.append(slot)
slot_gt_list.append(slots)
if cfg.local_rank == 0:
# progress_bar.set_postfix(disp_dict)
progress_bar.update()
if cfg.local_rank == 0:
progress_bar.close()
sec_per_example = (time.time() - start_time) / len(dataloader.dataset)
logger.info('Test finished(sec_per_example: %.4f second).' % sec_per_example)
if cfg.local_rank != 0:
return {}
dataset.evaluate_point_detection(point_pred_list, point_gt_list)
logger.info('****************Point Detection Evaluation Done.*****************')
dataset.evaluate_slot_detection(slot_pred_list, slot_gt_list)
logger.info('****************Slot Detection Evaluation Done.*****************')
``` |
{
"source": "Jiaolong/ss-da-consistency",
"score": 3
} |
#### File: ss-da-consistency/networks/resnet.py
```python
from torch import nn
from torch.utils import model_zoo
from torchvision import models
from torchvision.models.resnet import BasicBlock, model_urls, Bottleneck
from networks.model_utils import init_weights
resnet_dict = {"ResNet18":models.resnet18, "ResNet34":models.resnet34,
"ResNet50":models.resnet50, "ResNet101":models.resnet101, "ResNet152":models.resnet152}
class ResNetFc(nn.Module):
def __init__(self, resnet_name='ResNet50', use_bottleneck=True, bottleneck_dim=256,
aux_classes=1000, classes=100, output='all'):
super(ResNetFc, self).__init__()
model_resnet = resnet_dict[resnet_name](pretrained=True)
self.output = output
self.conv1 = model_resnet.conv1
self.bn1 = model_resnet.bn1
self.relu = model_resnet.relu
self.maxpool = model_resnet.maxpool
self.layer1 = model_resnet.layer1
self.layer2 = model_resnet.layer2
self.layer3 = model_resnet.layer3
self.layer4 = model_resnet.layer4
self.avgpool = model_resnet.avgpool
self.feature_layers = nn.Sequential(self.conv1, self.bn1, self.relu, self.maxpool, \
self.layer1, self.layer2, self.layer3, self.layer4, self.avgpool)
self.use_bottleneck = use_bottleneck
if self.use_bottleneck:
self.bottleneck = nn.Linear(model_resnet.fc.in_features, bottleneck_dim)
self.bottleneck.apply(init_weights)
self.feat_dim = bottleneck_dim
else:
self.feat_dim = model_resnet.fc.in_features
self.fc = nn.Linear(self.feat_dim, classes)
self.fc.apply(init_weights)
self.aux_classifier = nn.Linear(self.feat_dim, aux_classes)
self.aux_classifier.apply(init_weights)
def forward(self, x):
x = self.feature_layers(x)
x = x.view(x.size(0), -1)
if self.use_bottleneck:
x = self.bottleneck(x)
if self.output == 'feature':
return x
elif self.output == 'feature+class_logits':
return x, self.fc(x)
else:
return self.aux_classifier(x), self.fc(x)
def output_num(self):
return self.feat_dim
def get_parameters(self):
if self.use_bottleneck:
parameter_list = [{"params":self.feature_layers.parameters(), "lr_mult":1, 'decay_mult':2}, \
{"params":self.bottleneck.parameters(), "lr_mult":10, 'decay_mult':2}, \
{"params":self.fc.parameters(), "lr_mult":10, 'decay_mult':2},
{"params":self.aux_classifier.parameters(), "lr_mult":10, 'decay_mult':2},
]
else:
parameter_list = [{"params":self.feature_layers.parameters(), "lr_mult":1, 'decay_mult':2}, \
{"params":self.fc.parameters(), "lr_mult":10, 'decay_mult':2},
{"params":self.aux_classifier.parameters(), "lr_mult":10, 'decay_mult':2},
]
return parameter_list
class ResNet(nn.Module):
def __init__(self, block, layers, aux_classes=1000, classes=100, output='all'):
self.output = output
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
# print(block.expansion)
self.feat_dim = 512 * block.expansion
self.aux_classifier = nn.Linear(512 * block.expansion, aux_classes)
self.class_classifier = nn.Linear(512 * block.expansion, classes)
# init weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight)
nn.init.normal_(m.bias)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def is_patch_based(self):
return False
def get_parameters(self):
feat_param_list = [
{"params":self.conv1.parameters(), "lr_mult":1, 'decay_mult':2},
{"params":self.bn1.parameters(), "lr_mult":1, 'decay_mult':2},
{"params":self.layer1.parameters(), "lr_mult":1, 'decay_mult':2},
{"params":self.layer2.parameters(), "lr_mult":1, 'decay_mult':2},
{"params":self.layer3.parameters(), "lr_mult":1, 'decay_mult':2},
{"params":self.layer4.parameters(), "lr_mult":1, 'decay_mult':2}
]
class_param_list = [ {"params":self.class_classifier.parameters(), "lr_mult":10, 'decay_mult':2} ]
aux_param_list = [ {"params":self.aux_classifier.parameters(), "lr_mult":10, 'decay_mult':2} ]
if self.output == 'feature':
return feat_param_list
elif self.output == 'feature+class_logits':
return feat_param_list + class_param_list
else:
return feat_param_list + class_param_list + aux_param_list
def forward(self, x, **kwargs):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1) # B x C
if self.output == 'feature':
return x
elif self.output == 'feature+class_logits':
return x, self.class_classifier(x)
else:
return self.aux_classifier(x), self.class_classifier(x)
def resnet18(pretrained=True, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
return model
def resnet50(pretrained=True, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']), strict=False)
return model
def resnet50_fc(resnet_name='ResNet50', **kwargs):
model = ResNetFc(resnet_name=resnet_name, **kwargs)
return model
def resnet18_fc(resnet_name='ResNet18', **kwargs):
model = ResNetFc(resnet_name=resnet_name, **kwargs)
return model
``` |
{
"source": "jiaolongsun/ssgene-galaxy",
"score": 2
} |
#### File: test/integration/test_pulsar_embedded.py
```python
import os
from base import integration_util
SCRIPT_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
EMBEDDED_PULSAR_JOB_CONFIG_FILE = os.path.join(SCRIPT_DIRECTORY, "embedded_pulsar_job_conf.xml")
class EmbeddedPulsarIntegrationTestCase(integration_util.IntegrationTestCase):
"""Start a Pulsar job."""
framework_tool_and_types = True
@classmethod
def handle_galaxy_config_kwds(cls, config):
config["job_config_file"] = EMBEDDED_PULSAR_JOB_CONFIG_FILE
def test_tool_simple_constructs(self):
self._run_tool_test("simple_constructs")
def test_multi_data_param(self):
self._run_tool_test("multi_data_param")
``` |
{
"source": "Jiaolong/trajectory-prediction",
"score": 2
} |
#### File: lidardet/datasets/base.py
```python
import numpy as np
from pathlib import Path
from collections import defaultdict
from torch.utils.data import Dataset
from .registry import DATASETS
from .augmentor import DataAugmentor
from .processor import DataProcessor
@DATASETS.register
class PointCloudDataset(Dataset):
def __init__(self, cfg, logger=None):
self.cfg = cfg
self.logger = logger
self.class_names = cfg.class_names
self.root_path = Path(cfg.root_path)
if self.cfg.get('augmentor', None):
self.data_augmentor = DataAugmentor(self.root_path, cfg.augmentor, self.class_names, logger)
if self.cfg.get('pre_processor', None):
self.pre_processor = DataProcessor(cfg.pre_processor)
def __len__(self):
raise NotImplementedError
def forward(self, index):
raise NotImplementedError
def augment_data(self, data_dict):
if data_dict.get('gt_names', None) is not None:
gt_boxes_mask = np.array([n in self.class_names for n in data_dict['gt_names']], dtype=np.bool_)
data_dict = self.data_augmentor.forward(
data_dict={
**data_dict,
'gt_boxes_mask': gt_boxes_mask
}
)
else:
data_dict = self.data_augmentor.forward(
data_dict={**data_dict})
if data_dict.get('gt_boxes', None) is not None:
if len(data_dict['gt_boxes']) == 0:
new_index = np.random.randint(self.__len__())
return self.__getitem__(new_index)
return data_dict
def pre_process(self, data_dict):
data_dict = self.pre_processor.forward(data_dict)
return data_dict
@staticmethod
def collate_batch(batch_list, _unused=False):
data_dict = defaultdict(list)
for cur_sample in batch_list:
for key, val in cur_sample.items():
data_dict[key].append(val)
batch_size = len(batch_list)
ret = {}
for key, val in data_dict.items():
if key in ['voxels', 'voxel_num_points']:
ret[key] = np.concatenate(val, axis=0)
elif key in ['points', 'voxel_coords']:
coors = []
for i, coor in enumerate(val):
coor_pad = np.pad(coor, ((0, 0), (1, 0)), mode='constant', constant_values=i)
coors.append(coor_pad)
ret[key] = np.concatenate(coors, axis=0)
elif key in ['gt_boxes']:
max_gt = max([len(x) for x in val])
batch_gt_boxes3d = np.zeros((batch_size, max_gt, val[0].shape[-1]), dtype=np.float32)
for k in range(batch_size):
batch_gt_boxes3d[k, :val[k].__len__(), :] = val[k]
ret[key] = batch_gt_boxes3d
else:
ret[key] = np.stack(val, axis=0)
ret['batch_size'] = batch_size
return ret
```
#### File: lidardet/datasets/builder.py
```python
import torch
from torch.utils.data import DataLoader
from torch.utils.data import DistributedSampler as _DistributedSampler
from lidardet.utils.dist import get_dist_info
from lidardet.utils.registry import build_from_cfg
from .registry import DATASETS
class DistributedSampler(_DistributedSampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def build_dataset(cfg, logger=None):
dataset = build_from_cfg(cfg, DATASETS, dict(logger=logger))
return dataset
def build_dataloader(cfg, dist=False, training=False, logger=None):
dataset = build_dataset(cfg, logger)
if dist:
if training:
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
rank, world_size = get_dist_info()
sampler = DistributedSampler(dataset, world_size, rank, shuffle=False)
else:
sampler = None
dataloader = DataLoader(
dataset, batch_size=cfg.batch_size, pin_memory=True, num_workers=cfg.num_workers,
shuffle=(sampler is None) and training, collate_fn=dataset.collate_batch,
drop_last=False, sampler=sampler, timeout=0
)
return dataset, dataloader, sampler
```
#### File: models/trajectory_predictor/conv_traj.py
```python
import math
import torch
import torch.nn as nn
import numpy as np
from .conv_header import ConvHeader
from .backbone import Bottleneck, BackBone
from .predictor_base import PredictorBase
from ..registry import TRAJECTORY_PREDICTOR
@TRAJECTORY_PREDICTOR.register
class ConvTrajPredictor(PredictorBase):
def __init__(self, cfg):
super().__init__(cfg=cfg)
self.backbone = BackBone(Bottleneck, cfg.backbone)
self.header = ConvHeader(cfg.header)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, batch_dict):
x = batch_dict['lidar_bev']
img_hmi = batch_dict['img_hmi']
x = torch.cat([x, img_hmi], dim=1)
batch_dict['input'] = x
batch_dict = self.backbone(batch_dict)
batch_dict = self.header(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
```
#### File: models/trajectory_predictor/header.py
```python
import torch
from torch import nn
from .mtp import MTP
from .covernet import CoverNet
from .backbone import conv3x3
class Header(nn.Module):
def __init__(self, cfg):
super(Header, self).__init__()
self.cfg = cfg
self.use_bn = cfg.use_bn
self.with_attention = cfg.with_attention
bias = not self.use_bn
self.conv1 = conv3x3(96, 96, bias=bias)
self.bn1 = nn.BatchNorm2d(96)
self.conv2 = conv3x3(96, 96, bias=bias)
self.bn2 = nn.BatchNorm2d(96)
self.conv3 = conv3x3(96, 96, bias=bias)
self.bn3 = nn.BatchNorm2d(96)
self.conv4 = conv3x3(96, 96, bias=bias)
self.bn4 = nn.BatchNorm2d(96)
dim_conv5 = 384
if self.with_attention:
self.conv7 = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1, bias=bias)
self.bn7 = nn.BatchNorm2d(16)
self.conv8 = nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias=bias)
self.bn8 = nn.BatchNorm2d(32)
#self.pool = nn.MaxPool2d(kernel_size=4, padding=(1,0))
dim_conv5 += 32
self.conv5 = nn.Conv2d(dim_conv5, 512, kernel_size=3, stride=2, padding=0, bias=bias)
self.bn5 = nn.BatchNorm2d(512)
self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, stride=2, padding=0, bias=bias)
self.bn6 = nn.BatchNorm2d(1024)
self.seg_head = conv3x3(96, 1, bias=True)
if self.cfg.get('mtp'):
self.pred_head = MTP(cfg.mtp)
elif self.cfg.get('covernet'):
self.pred_head = CoverNet(cfg.covernet)
self.seg_loss_func = nn.BCELoss()
def forward(self, batch_dict):
x = batch_dict['seg_features']
x = self.conv1(x)
if self.use_bn:
x = self.bn1(x)
x = self.conv2(x)
if self.use_bn:
x = self.bn2(x)
x = self.conv3(x)
if self.use_bn:
x = self.bn3(x)
x = self.conv4(x)
if self.use_bn:
x = self.bn4(x)
seg = torch.sigmoid(self.seg_head(x)) # [b, 1, 75, 100]
y = batch_dict['reg_features'] # [b, 384, 19, 25]
if self.with_attention:
z = self.conv7(seg)
z = self.bn7(z)
z = self.conv8(z)
z = self.bn8(z)
y = torch.cat([y, z], dim=1)
y = self.conv5(y)
if self.use_bn:
y = self.bn5(y)
y = self.conv6(y)
if self.use_bn:
y = self.bn6(y)
y = y.mean([2, 3])
batch_dict['reg_features'] = y
batch_dict = self.pred_head(batch_dict)
if self.training:
self.seg_pred = seg.squeeze(1)
self.seg_target = batch_dict['img_ins']
batch_dict['pred_seg'] = seg.squeeze(1)
return batch_dict
def get_loss(self):
loss_pred = self.pred_head.get_loss()
loss_seg = self.seg_loss_func(self.seg_pred, self.seg_target)
loss = self.cfg.weight_loss_seg * loss_seg + loss_pred
tb_dict = {'loss_pred': loss_pred, 'loss_seg': loss_seg}
return loss, tb_dict
def get_prediction(self, batch_dict):
pred_traj_list = []
if self.cfg.get('mtp'):
pred_traj_batch = batch_dict['trajectory_predictions']
mode_prob_batch = batch_dict['mode_probabilities']
for i, mode_prob in enumerate(mode_prob_batch):
order = torch.argsort(mode_prob, dim=0, descending=True)
traj_sorted = pred_traj_batch[i][order].cpu().numpy()
pred_traj_list.append(traj_sorted)
elif self.cfg.get('covernet'):
logits = batch_dict['logits']
for i, mode_prob in enumerate(logits):
order = torch.argsort(mode_prob, dim=0, descending=True)
pred_traj_list.append(order.cpu().numpy())
return pred_traj_list
```
#### File: models/trajectory_predictor/predictor_base.py
```python
import torch
import os
import torch.nn as nn
from ..registry import TRAJECTORY_PREDICTOR
@TRAJECTORY_PREDICTOR.register
class PredictorBase(nn.Module):
def __init__(self, cfg):
super().__init__()
self.model_cfg = cfg
self.register_buffer('global_step', torch.LongTensor(1).zero_())
@property
def mode(self):
return 'TRAIN' if self.training else 'TEST'
def update_global_step(self):
self.global_step += 1
def forward(self, **kwargs):
raise NotImplementedError
def post_processing(self, batch_dict):
pred_dicts = {}
ret_dicts = {}
pred_dicts['pred_seg'] = batch_dict['pred_seg'].cpu().numpy()
if 'pred_heatmap' in batch_dict:
pred_dicts['pred_heatmap'] = batch_dict['pred_heatmap'].cpu().numpy()
pred_dicts['pred_traj'] = self.header.get_prediction(batch_dict)
return pred_dicts, ret_dicts
def get_training_loss(self):
disp_dict = {}
loss, tb_dict = self.header.get_loss()
tb_dict = {
'loss': loss.item(),
**tb_dict
}
return loss, tb_dict, disp_dict
def load_params_from_file(self, filename, logger=None, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
if logger:
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
if logger and 'version' in checkpoint:
logger.info('==> Checkpoint trained from version: %s' % checkpoint['version'])
update_model_state = {}
for key, val in model_state_disk.items():
if key in self.state_dict():
if self.state_dict()[key].shape == model_state_disk[key].shape:
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
#else:
# logger.info('Shape not matched %s: self --> %s vs disk --> %s ' % (key, str(self.state_dict()[key].shape), str(val.shape)))
state_dict = self.state_dict()
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
for key in state_dict:
if key not in update_model_state and logger:
logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))
if logger:
logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(self.state_dict())))
else:
print('==> Done (loaded %d/%d)' % (len(update_model_state), len(self.state_dict())))
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self.load_state_dict(checkpoint['model_state'])
if optimizer is not None:
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' % checkpoint['version'])
logger.info('==> Done')
return it, epoch
```
#### File: lidardet/utils/range_image.py
```python
import numpy as np
class RangeImage(object):
def __init__(self, cfg = {}):
self.cfg = cfg
self.max_range = cfg.get('max_range', 80.0)
self.min_range = cfg.get('min_range', 2.0)
self.fov_up = cfg.get('fov_up', 3.0) / 180.0 * np.pi
self.fov_down = cfg.get('fov_down', -25.0) / 180.0 * np.pi
# get vertical field of view total in radians
self.fov = abs(self.fov_down) + abs(self.fov_up)
self.cols = cfg.get('cols', 1024) # 1800
self.rows = cfg.get('rows', 64)
def from_points(self, points_in, normalize=True):
"""
Project points to range image
"""
points = np.copy(points_in)
# get depth of all points
depth = np.linalg.norm(points[:, :3], 2, axis=1)
# filter by range limit
points = points[(depth > self.min_range) & (depth < self.max_range)]
depth = depth[(depth > self.min_range) & (depth < self.max_range)]
# extract x, y, z and intensity values
x = points[:,0]
y = points[:,1]
z = points[:,2]
t = points[:,3]
# get horizontal and vertical angles [radian]
yaw = -np.arctan2(y, x) # [-pi, pi]
pitch = np.arcsin(z / depth)
# get projections in image coords
proj_x = 0.5 * (yaw / np.pi + 1.0) # in [0.0, 1.0]
proj_y = 1.0 - (pitch + abs(self.fov_down)) / self.fov # in [0.0, 1.0]
# scale to image size using angular resolution
proj_x *= self.cols # in [0.0, cols]
proj_y *= self.rows # in [0.0, rows]
# round and clamp for use as index
proj_x = np.floor(proj_x)
proj_x = np.minimum(self.cols - 1, proj_x)
proj_x = np.maximum(0, proj_x).astype(np.int32) # in [0,W-1]
proj_y = np.floor(proj_y)
proj_y = np.minimum(self.rows - 1, proj_y)
proj_y = np.maximum(0, proj_y).astype(np.int32) # in [0,H-1]
# sort depth in ascending order to keep more far distance points
indices = np.arange(depth.shape[0])
order = np.argsort(depth)
depth = depth[order]
indices = indices[order]
points = points[order]
proj_y = proj_y[order]
proj_x = proj_x[order]
proj_range = np.zeros((self.rows, self.cols, 5), dtype=np.float32) # [H,W] range (0 is no data)
if normalize:
depth /= self.max_range
proj_range[proj_y, proj_x, 0] = depth
proj_range[proj_y, proj_x, 1] = points[:,3]
proj_range[proj_y, proj_x, 2] = points[:,0]
proj_range[proj_y, proj_x, 3] = points[:,1]
proj_range[proj_y, proj_x, 4] = points[:,2]
return proj_range
def to_points(self, img_in, denormalize=True):
img = np.copy(img_in)
proj_y = np.float32(np.arange(self.rows)) / self.rows
proj_x = np.float32(np.arange(self.cols)) / self.cols
v_angles = (1.0 - proj_y) * self.fov - abs(self.fov_down)
h_angles = (2 * proj_x - 1.0) * np.pi
points = []
coordinates = []
if denormalize:
img[:,:,0] *= self.max_range
for i in range(self.rows):
for j in range(self.cols):
depth = img[i, j, 0]
intensity = 0
if img.shape[2] >= 2:
intensity = img[i, j, 1]
if depth < self.min_range:
continue
h_angle = h_angles[j]
v_angle = v_angles[i]
x = np.sin(h_angle) * np.cos(v_angle) * depth
y = np.cos(h_angle) * np.cos(v_angle) * depth
z = np.sin(v_angle) * depth
point = np.array([x, y, z, intensity]).astype(np.float32)
points.append(point)
coordinates.append(np.array([i, j]).astype(np.int32))
return np.array(points), np.array(coordinates)
```
#### File: tools/eval_utils/trajectory_prediction.py
```python
import tqdm
import time
import pickle
import numpy as np
import torch
from lidardet.models import load_data_to_gpu
def eval_trajectory_prediction(cfg, model, dataloader, logger, dist_test=False, save_to_file=False, result_dir=None):
result_dir.mkdir(parents=True, exist_ok=True)
dataset = dataloader.dataset
if dist_test:
num_gpus = torch.cuda.device_count()
local_rank = cfg.local_rank % num_gpus
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[local_rank],
broadcast_buffers=False
)
model.eval()
if cfg.local_rank == 0:
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True)
start_time = time.time()
result_dicts_list = []
for i, batch_dict in enumerate(dataloader):
load_data_to_gpu(batch_dict)
with torch.no_grad():
pred_dicts, ret_dict = model(batch_dict)
result_dicts = dataset.generate_prediction_dicts(batch_dict, pred_dicts,
output_path = result_dir if save_to_file else None)
result_dicts_list.append(result_dicts)
if cfg.local_rank == 0:
# progress_bar.set_postfix(disp_dict)
progress_bar.update()
if cfg.local_rank == 0:
progress_bar.close()
sec_per_example = (time.time() - start_time) / len(dataloader.dataset)
logger.info('Test finished(sec_per_example: %.4f second).' % sec_per_example)
if cfg.local_rank != 0:
return {}
dataset.evaluation(result_dicts_list)
logger.info('Result is save to %s' % result_dir)
logger.info('****************Evaluation done.*****************')
``` |
{
"source": "Jiaolong/xnor-net",
"score": 3
} |
#### File: xnor-net/mxnet/data_loader.py
```python
import os
from util import download_file
import mxnet as mx
import numpy as np
import gzip, struct
def read_data(data_path, label, image):
"""
download and read data into numpy
"""
base_url = 'http://yann.lecun.com/exdb/mnist/'
with gzip.open(download_file(base_url+label, os.path.join(data_path,label))) as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
label = np.fromstring(flbl.read(), dtype=np.int8)
with gzip.open(download_file(base_url+image, os.path.join(data_path,image)), 'rb') as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols)
return (label, image)
def to4d(img):
"""
reshape to 4D arrays
"""
return img.reshape(img.shape[0], 1, 28, 28).astype(np.float32)/255
def get_mnist_iter(data_path, batch_size):
"""
create data iterator with NDArrayIter
"""
(train_lbl, train_img) = read_data(data_path,
'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz')
(val_lbl, val_img) = read_data(data_path,
't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz')
train = mx.io.NDArrayIter(
to4d(train_img), train_lbl, batch_size, shuffle=True)
val = mx.io.NDArrayIter(
to4d(val_img), val_lbl, batch_size)
return (train, val)
``` |
{
"source": "jiaolu/renderdoc",
"score": 2
} |
#### File: tests/D3D11/D3D11_AMD_Shader_Extensions.py
```python
import renderdoc as rd
import rdtest
import struct
class D3D11_AMD_Shader_Extensions(rdtest.TestCase):
demos_test_name = 'D3D11_AMD_Shader_Extensions'
def check_capture(self):
action = self.get_last_action()
self.controller.SetFrameEvent(action.eventId, False)
# Should have barycentrics showing the closest vertex for each pixel in the triangle
# Without relying on barycentric order, ensure that the three pixels are red, green, and blue
pixels = []
picked: rd.PixelValue = self.controller.PickPixel(action.copyDestination, 125, 215, rd.Subresource(),
rd.CompType.UNorm)
pixels.append(picked.floatValue[0:4])
picked: rd.PixelValue = self.controller.PickPixel(action.copyDestination, 200, 85, rd.Subresource(),
rd.CompType.UNorm)
pixels.append(picked.floatValue[0:4])
picked: rd.PixelValue = self.controller.PickPixel(action.copyDestination, 285, 215, rd.Subresource(),
rd.CompType.UNorm)
pixels.append(picked.floatValue[0:4])
if (not (1.0, 0.0, 0.0, 1.0) in pixels) or (not (1.0, 0.0, 0.0, 1.0) in pixels) or (
not (1.0, 0.0, 0.0, 1.0) in pixels):
raise rdtest.TestFailureException("Expected red, green and blue in picked pixels. Got {}".format(pixels))
rdtest.log.success("Picked barycentric values are as expected")
# find the cpuMax and gpuMax actions
cpuMax = self.find_action("cpuMax")
gpuMax = self.find_action("gpuMax")
# The values should be identical
cpuMax = int(cpuMax.name[8:])
gpuMax = int(gpuMax.name[8:])
if cpuMax != gpuMax or cpuMax == 0:
raise rdtest.TestFailureException(
"captured cpuMax and gpuMax are not equal and positive: {} vs {}".format(cpuMax, gpuMax))
rdtest.log.success("recorded cpuMax and gpuMax are as expected")
outBuf = self.get_resource_by_name("outBuf")
data = self.controller.GetBufferData(outBuf.resourceId, 0, 8)
replayedGpuMax = struct.unpack("Q", data)[0]
if replayedGpuMax != gpuMax:
raise rdtest.TestFailureException(
"captured gpuMax and replayed gpuMax are not equal: {} vs {}".format(gpuMax, replayedGpuMax))
rdtest.log.success("replayed gpuMax is as expected")
cs = self.get_resource_by_name("cs")
pipe = rd.ResourceId()
refl: rd.ShaderReflection = self.controller.GetShader(pipe, cs.resourceId,
rd.ShaderEntryPoint("main", rd.ShaderStage.Compute))
self.check(len(refl.readWriteResources) == 2)
self.check([rw.name for rw in refl.readWriteResources] == ["inUAV", "outUAV"])
disasm = self.controller.DisassembleShader(pipe, refl, "")
if "amd_u64_atomic" not in disasm:
raise rdtest.TestFailureException(
"Didn't find expected AMD opcode in disassembly: {}".format(disasm))
rdtest.log.success("compute shader disassembly is as expected")
if refl.debugInfo.debuggable:
self.controller.SetFrameEvent(self.find_action("Dispatch").eventId, False)
trace: rd.ShaderDebugTrace = self.controller.DebugThread((0, 0, 0), (0, 0, 0))
if trace.debugger is None:
self.controller.FreeTrace(trace)
raise rdtest.TestFailureException("Couldn't debug compute shader")
cycles, variables = self.process_trace(trace)
if cycles < 3:
raise rdtest.TestFailureException("Compute shader has too few cycles {}".format(cycles))
else:
raise rdtest.TestFailureException(
"Compute shader is listed as non-debuggable: {}".format(refl.debugInfo.debugStatus))
rdtest.log.success("compute shader debugged successfully")
``` |
{
"source": "Jiaoma/expReporter",
"score": 3
} |
#### File: Jiaoma/expReporter/saveTools.py
```python
import torch
import shutil
import os
from os.path import join
from SQLRecoder import SQLRecoder
def saveCheckpoint(state,is_best,filename='checkpoint.pth.tar'):
torch.save(state,filename)
if is_best:
shutil.copyfile(filename,'model_best.pth.tar')
class modelAssistant:
def __init__(self,expName):
pass
# Back up the old code
# changed the idea of modelSaver
'''
class modelSaver:
''''''
Though modelSaver only take down limited information but they are the minist for the following,
you can create new table to take down more information as long as you set the 'expID' from table expSave
and 'repID' from trainSave as foreign keys.
''''''
def __init__(self):
self.sqlRecoder=SQLRecoder()
print('modelSaver want to connect to database')
self.sqlRecoder.wakeUp()
try:
self.sqlRecoder.useExistTable('expSave')
except:
self.sqlRecoder.createTable('expSave',
{
'expID':'INT(10) PRIMARY KEY AUTO_INCREMENT',
'expName':'CHAR(50)',
'dataset':'CHAR(50)',
'configs':'CHAR(100)'
})
try:
self.sqlRecoder.useExistTable('trainSave')
except:
self.sqlRecoder.createTable('trainSave',{
'repID':'INT(10) PRIMARY KEY AUTO_INCREMENT',
'expID':'INT(10)',
'location':'CHAR(50)',
'epoch':'INT(10)',
'loss':'FLOAT',
'CONSTRAINT exp_train_ID':'FOREIGN KEY(expID) REFERENCES expSave(expID)'
})
''''''
The table of test is named testSave, but it's form I can't defined here for the various possibility of test
methods may used in testing.
''''''
def wakeUp(self):
self.sqlRecoder.wakeUp()
def sleep(self):
self.sqlRecoder.sleep()
def setExpInfo(self,expName:str,dataset:str,saveLocation:str,configs:str):
assert isinstance(expName,str)
assert isinstance(dataset,str)
assert isinstance(configs,str)
assert isinstance(saveLocation)
assert os.path.isdir(saveLocation)
self.sqlRecoder.useExistTable('expSave')
self.sqlRecoder.note([expName,dataset,configs])
self.sqlRecoder.note(None,nomore=True)
self.expID=self.sqlRecoder.getLastID()
self.saveLocation=saveLocation
self.lossLowest=99999
def save(self,parameters:dict,fileName:str,epoch:int,loss:float,postlude='.pth.tar',mode='newLowest'):
''''''
:param parameters: dict of parameters of optimizer and net
:param fileName: the name you want save your parameters as without postlude, the default postlude is '.pth.tar'/
And you needn't specify the location here, the location is specified in setExpInfo().
:param epoch: the epoch of current parameters
:param loss: the loss of current parameters
:param postlude: the postlude you want to save your checkpoint as, for most cases you needn't change it.
:param mode: there are two modes here named 'newLowest' and 'onlyOneLowest', the former one/
means write down every checkpoint that is lower than the local lowest, while the/
other means write down only one checkpoint that is the lowest in global history.
:return: None unless error occurs.
''''''
location=join(self.saveLocation,fileName,'_',str(epoch),'_',str(loss),postlude)
torch.save(parameters,location)
self.sqlRecoder.useExistTable('trainSave')
self.sqlRecoder.note([self.expID,location,str(epoch),str(loss)])
self.sqlRecoder.note(None,nomore=True)
if loss<self.lossLowest:
shutil.copyfile(location, join(self.saveLocation,fileName,'_best.pth.tar'))
self.lossLowest=loss
'''
``` |
{
"source": "Jiaoma/fileStr",
"score": 3
} |
#### File: Jiaoma/fileStr/fileInfo.py
```python
from os import listdir
from os.path import join
import os, errno
def getImageNum(rootDir):
return len(listdir(join(rootDir)))
def safeMkdir(path:str):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
```
#### File: Jiaoma/fileStr/judge.py
```python
from collections.abc import Iterable
import numpy as np
def is_image_file(filename,
allow=('png','PNG','jpeg','JPEG','tif','TIF','jpg','JPG','gif','ppm','bmp')):
if not isinstance(allow,list) or not isinstance(allow,tuple):
allow=(allow),
return any(filename.endswith(extension) for extension in allow)
def is_relative(keywords,needcheck):
if keywords==None or not isinstance(keywords,Iterable):
return
if '<<' in keywords:
keywords=list(keywords.split('<<'))
if not isinstance(keywords,list):
keywords=[keywords]
for i in keywords:
if i in needcheck:
return True
return False
def getLabelIndex(label:np.array,colorDict:dict):
'''
:param label: h,w,c
:param colorDict: {(R,G,B):[index,name]}
:return: h,w,1
'''
assert len(label.shape)==3 # h,w,c
h,w,c=label.shape
assert c==3
label=label.reshape(-1,c)
indexLabel=np.asarray([colorDict[tuple(i.tolist())][0] for i in label])
indexLabel=indexLabel.reshape((h,w,1))
return indexLabel
class labelConvert:
def __init__(self,colorDict:dict):
self.colorDict=colorDict
self.index=[0 for _i in range(len(colorDict))]
for key,value in colorDict.items():
self.index[value[0]]=key
def __call__(self,label:np.array):
assert len(label.shape) == 3 # h,w,c
h, w, c = label.shape
assert c == 3
label = label.reshape(-1, c)
indexLabel = np.asarray([self.colorDict[tuple(i.tolist())][0] for i in label])
indexLabel = indexLabel.reshape((h, w, 1))
return indexLabel
def goBack(self,label:np.array):
# shape : n *1 *h*w
n,c,h,w=label.shape
assert c==1
label=label.reshape(-1)
labelNew=np.asarray([self.index[int(i)] for i in label])
labelNew=labelNew.reshape(n,h,w,3)
labelNew=np.transpose(labelNew,(0,3,1,2))
return labelNew
if __name__=='__main__':
print(is_relative("34_",'34_training.tif'))
print(is_image_file('Image_01L_2ndHO.png',allow='jpg'))
colorDict={(0,200,0):[0,'水田'],(150,250,0):[1,'水浇地'],
(150,200,150):[2,'旱耕地'],(200,0,200):[3,'园地'],
(150,0,250):[4,'乔木林地'],(150,150,250):[5,'灌木林地'],
(250,200,0):[6,'天然草地'],(200,200,0):[7,'人工草地'],
(200,0,0):[8,'工业用地'],(250,0,150):[9,'城市住宅'],
(200,150,150):[10,'村镇住宅'],(250,150,150):[11,'交通运输'],
(0,0,200):[12,'河流'],(0,150,200):[13,'湖泊'],
(0,200,250):[14,'坑塘'],(0,0,0):[15,'其他类别']
}
import numpy as np
b=np.zeros((2,2,3))
b_=b.reshape(-1,3)
label=[colorDict[tuple(i.tolist())] for i in b_]
print(label)
b=np.zeros((64,64,3))
lc=labelConvert(colorDict)
c=lc(b)
print(c.shape)
d=np.zeros((4,1,64,64))
e=lc.goBack(d)
print(e.shape)
``` |
{
"source": "jiaoml1996/mmaction2",
"score": 2
} |
#### File: tests/test_data/test_ava_dataset.py
```python
import os.path as osp
import mmcv
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from mmaction.datasets import AVADataset
def check_keys_contain(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys).issubset(set(result_keys))
class TestAVADataset:
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(
osp.dirname(osp.dirname(__file__)), 'data', 'test_ava_dataset')
cls.ann_file = osp.join(cls.data_prefix, 'ava_sample.csv')
cls.exclude_file = osp.join(cls.data_prefix,
'ava_excluded_timestamps_sample.csv')
cls.proposal_file = osp.join(cls.data_prefix,
'ava_proposals_sample.pkl')
cls.pipeline = [
dict(dict(type='SampleAVAFrames', clip_len=32, frame_interval=2))
]
cls.proposal = mmcv.load(cls.proposal_file)
def test_ava_dataset(self):
target_keys = [
'frame_dir', 'video_id', 'timestamp', 'img_key', 'shot_info',
'fps', 'ann'
]
ann_keys = ['gt_labels', 'gt_bboxes', 'entity_ids']
pkl_keys = ['0f39OWEqJ24,0902', '0f39OWEqJ24,0903', <KEY>']
ava_dataset = AVADataset(
self.ann_file,
self.exclude_file,
self.pipeline,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
ava_infos = ava_dataset.video_infos
assert check_keys_contain(ava_dataset.proposals.keys(), pkl_keys)
assert check_keys_contain(ava_infos[0].keys(), target_keys)
assert check_keys_contain(ava_infos[0]['ann'].keys(), ann_keys)
assert len(ava_infos) == 1
assert ava_infos[0]['frame_dir'] == osp.join(self.data_prefix,
'0f39OWEqJ24')
assert ava_infos[0]['video_id'] == '0f39OWEqJ24'
assert ava_infos[0]['timestamp'] == 902
assert ava_infos[0]['img_key'] == '0f39OWEqJ24,0902'
assert ava_infos[0]['shot_info'] == (0, 27000)
assert ava_infos[0]['fps'] == 30
assert len(ava_infos[0]['ann']) == 3
target_labels = np.array([12, 17, 79])
labels = np.zeros([81])
labels[target_labels] = 1.
target_labels = labels[None, ...]
assert_array_equal(ava_infos[0]['ann']['gt_labels'], target_labels)
assert_array_equal(ava_infos[0]['ann']['gt_bboxes'],
np.array([[0.031, 0.162, 0.67, 0.995]]))
assert_array_equal(ava_infos[0]['ann']['entity_ids'], np.array([0]))
ava_dataset = AVADataset(
self.ann_file,
None,
self.pipeline,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
ava_infos = ava_dataset.video_infos
assert len(ava_infos) == 3
ava_dataset = AVADataset(
self.ann_file,
None,
self.pipeline,
test_mode=True,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
ava_infos = ava_dataset.video_infos
assert len(ava_infos) == 3
ava_dataset = AVADataset(
self.ann_file,
None,
self.pipeline,
test_mode=True,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
def test_ava_pipeline(self):
target_keys = [
'frame_dir', 'video_id', 'timestamp', 'img_key', 'shot_info',
'fps', 'filename_tmpl', 'modality', 'start_index',
'timestamp_start', 'timestamp_end', 'proposals', 'scores',
'frame_inds', 'clip_len', 'frame_interval', 'gt_labels',
'gt_bboxes', 'entity_ids'
]
ava_dataset = AVADataset(
self.ann_file,
self.exclude_file,
self.pipeline,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
result = ava_dataset[0]
assert check_keys_contain(result.keys(), target_keys)
assert result['filename_tmpl'] == 'img_{:05}.jpg'
assert result['modality'] == 'RGB'
assert result['start_index'] == 1
assert result['timestamp_start'] == 900
assert result['timestamp_end'] == 1800
assert_array_equal(result['proposals'],
np.array([[0.011, 0.157, 0.655, 0.983]]))
assert_array_equal(result['scores'], np.array([0.998163]))
assert result['clip_len'] == 32
assert result['frame_interval'] == 2
assert len(result['frame_inds']) == 32
ava_dataset = AVADataset(
self.ann_file,
None,
self.pipeline,
test_mode=True,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
# Try to get a sample
result = ava_dataset[0]
assert result['filename_tmpl'] == 'img_{:05}.jpg'
assert result['modality'] == 'RGB'
assert result['start_index'] == 1
assert result['timestamp_start'] == 900
assert result['timestamp_end'] == 1800
def test_ava_evaluate(self):
data_prefix = osp.join(
osp.dirname(__file__), '../data/test_eval_detection')
ann_file = osp.join(data_prefix, 'gt.csv')
label_file = osp.join(data_prefix, 'action_list.txt')
ava_dataset = AVADataset(
ann_file, None, [], label_file=label_file, num_classes=4)
fake_result = [[
np.array([[0.362, 0.156, 0.969, 0.666, 0.106],
[0.442, 0.083, 0.721, 0.947, 0.162]]),
np.array([[0.288, 0.365, 0.766, 0.551, 0.706],
[0.178, 0.296, 0.707, 0.995, 0.223]]),
np.array([[0.417, 0.167, 0.843, 0.939, 0.015],
[0.35, 0.421, 0.57, 0.689, 0.427]])
],
[
np.array([[0.256, 0.338, 0.726, 0.799, 0.563],
[0.071, 0.256, 0.64, 0.75, 0.297]]),
np.array([[0.326, 0.036, 0.513, 0.991, 0.405],
[0.351, 0.035, 0.729, 0.936, 0.945]]),
np.array([[0.051, 0.005, 0.975, 0.942, 0.424],
[0.347, 0.05, 0.97, 0.944, 0.396]])
],
[
np.array([[0.39, 0.087, 0.833, 0.616, 0.447],
[0.461, 0.212, 0.627, 0.527, 0.036]]),
np.array([[0.022, 0.394, 0.93, 0.527, 0.109],
[0.208, 0.462, 0.874, 0.948, 0.954]]),
np.array([[0.206, 0.456, 0.564, 0.725, 0.685],
[0.106, 0.445, 0.782, 0.673, 0.367]])
]]
res = ava_dataset.evaluate(fake_result)
assert_array_almost_equal(res['[email protected]@100'], 0.33333333)
assert_array_almost_equal(res['AR@100'], 0.15833333)
assert_array_almost_equal(res['PascalBoxes_Precision/[email protected]'],
0.027777778)
```
#### File: tests/test_models/test_common_modules.py
```python
import pytest
import torch
from mmaction.models import Conv2plus1d, ConvAudio
def test_conv2plus1d():
with pytest.raises(AssertionError):
# Length of kernel size, stride and padding must be the same
Conv2plus1d(3, 8, (2, 2))
conv_2plus1d = Conv2plus1d(3, 8, 2)
conv_2plus1d.init_weights()
assert torch.equal(conv_2plus1d.bn_s.weight,
torch.ones_like(conv_2plus1d.bn_s.weight))
assert torch.equal(conv_2plus1d.bn_s.bias,
torch.zeros_like(conv_2plus1d.bn_s.bias))
x = torch.rand(1, 3, 8, 256, 256)
output = conv_2plus1d(x)
assert output.shape == torch.Size([1, 8, 7, 255, 255])
def test_conv_audio():
conv_audio = ConvAudio(3, 8, 3)
conv_audio.init_weights()
x = torch.rand(1, 3, 8, 8)
output = conv_audio(x)
assert output.shape == torch.Size([1, 16, 8, 8])
conv_audio_sum = ConvAudio(3, 8, 3, op='sum')
output = conv_audio_sum(x)
assert output.shape == torch.Size([1, 8, 8, 8])
``` |
{
"source": "jiaomuwwl/tuya-home-assistant",
"score": 2
} |
#### File: custom_components/tuya/config_flow.py
```python
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.helpers import config_entry_oauth2_flow
from .const import (
CONF_COUNTRY_CODE,
DOMAIN,
CONF_ENDPOINT,
CONF_ACCESS_ID,
CONF_ACCESS_SECRET,
CONF_USERNAME,
CONF_PASSWORD,
CONF_PROJECT_TYPE,
TUYA_ENDPOINT,
TUYA_PROJECT_TYPE
)
from tuya_iot import TuyaOpenAPI, ProjectType
RESULT_SINGLE_INSTANCE = "single_instance_allowed"
_LOGGER = logging.getLogger(__name__)
## Project Type
DATA_SCHEMA_PROJECT_TYPE = vol.Schema(
{
vol.Required(CONF_PROJECT_TYPE): vol.In(TUYA_PROJECT_TYPE)
}
)
## INDUSTY_SOLUTIONS Schema
DATA_SCHEMA_INDUSTY_SOLUTIONS = vol.Schema(
{
vol.Required(CONF_ENDPOINT): vol.In(TUYA_ENDPOINT),
vol.Required(CONF_ACCESS_ID): str,
vol.Required(CONF_ACCESS_SECRET): str,
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
}
)
## SMART_HOME Schema
DATA_SCHEMA_SMART_HOME = vol.Schema(
{
vol.Required(CONF_ENDPOINT): vol.In(TUYA_ENDPOINT),
vol.Required(CONF_COUNTRY_CODE): str,
vol.Required(CONF_ACCESS_ID): str,
vol.Required(CONF_ACCESS_SECRET): str,
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
}
)
# @config_entries.HANDLERS.register(DOMAIN)
# class TuyaFlowHandler(config_entry_oauth2_flow.AbstractOAuth2FlowHandler):
# """Config flow to handle Tuya OAuth2 authentication."""
# DOMAIN = DOMAIN
# CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
# @property
# def logger(self) -> logging.Logger:
# """Return logger."""
# return logging.getLogger(__name__)
# async def async_step_user(self, user_input=None):
# """Handle a flow start."""
# if self.hass.config_entries.async_entries(DOMAIN):
# return self.async_abort(reason="single_instance_allowed")
# return await super().async_step_user(user_input)
class TuyaConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
conf_project_type = None
def _try_login(self, user_input):
print('TuyaConfigFlow._try_login start, user_input:', user_input)
project_type = ProjectType(user_input[CONF_PROJECT_TYPE])
api = TuyaOpenAPI(user_input[CONF_ENDPOINT], user_input[CONF_ACCESS_ID], user_input[CONF_ACCESS_SECRET], project_type)
api.set_dev_channel('hass')
response = api.login(user_input[CONF_USERNAME], user_input[CONF_PASSWORD]) if project_type == ProjectType.INDUSTY_SOLUTIONS else\
api.login(user_input[CONF_USERNAME], user_input[CONF_PASSWORD], user_input[CONF_COUNTRY_CODE])
print('TuyaConfigFlow._try_login finish, response:', response)
return response
async def async_step_import(self, user_input=None):
return await self.async_step_user(user_input, is_import=True)
async def async_step_project_type(self, user_input=None):
self.conf_project_type = user_input[CONF_PROJECT_TYPE]
self.project_type = ProjectType(self.conf_project_type)
return self.async_show_form(
step_id='user',
data_schema=DATA_SCHEMA_SMART_HOME
) if self.project_type == ProjectType.SMART_HOME else self.async_show_form(
step_id='user',
data_schema=DATA_SCHEMA_INDUSTY_SOLUTIONS
)
async def async_step_user(self, user_input=None, is_import=False):
print('TuyaConfigFlow.async_step_user start, is_import=', user_input)
if self._async_current_entries():
return self.async_abort(reason=RESULT_SINGLE_INSTANCE)
errors = {}
if user_input is not None:
if self.conf_project_type is not None:
user_input[CONF_PROJECT_TYPE] = self.conf_project_type
response = await self.hass.async_add_executor_job(self._try_login, user_input)
if response.get('success', False):
print('TuyaConfigFlow.async_step_user login success')
return self.async_create_entry(
title=user_input[CONF_ACCESS_ID],
data=user_input,
)
else:
errors['base'] = 'code={}, msg={}'.format(response.get('code', 0), response.get('msg', ''))
if is_import == True:
return self.async_abort(reason=errors['base'])
return self.async_show_form(
step_id='project_type',
data_schema=DATA_SCHEMA_PROJECT_TYPE,
errors=errors
)
```
#### File: custom_components/tuya/cover.py
```python
import logging
import json
from typing import Any, Dict, List, Optional, Tuple, cast
from homeassistant.core import HomeAssistant, Config
from homeassistant.config_entries import ConfigEntry
from homeassistant.components.cover import (
CoverEntity,
DEVICE_CLASS_CURTAIN,
SUPPORT_SET_POSITION,
SUPPORT_STOP,
SUPPORT_OPEN,
SUPPORT_CLOSE,
DOMAIN as DEVICE_DOMAIN
)
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect
)
from .const import (
DOMAIN,
TUYA_HA_TUYA_MAP,
TUYA_DISCOVERY_NEW,
TUYA_DEVICE_MANAGER
)
from .base import TuyaHaDevice
_LOGGER = logging.getLogger(__name__)
TUYA_HA_MAP = {
"cl": "cover", # Curtain
"clkg": "cover", # Curtain Switch
}
# Curtain
# https://developer.tuya.com/en/docs/iot/f?id=K9gf46o5mtfyc
DPCODE_CONTROL = 'control'
DPCODE_PERCENT_CONTROL = 'percent_control'
ATTR_POSITION = 'position'
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities):
"""Set up tuya cover dynamically through tuya discovery."""
print("cover init")
hass.data[DOMAIN][TUYA_HA_TUYA_MAP].update(TUYA_HA_MAP)
async def async_discover_device(dev_ids):
"""Discover and add a discovered tuya cover."""
print("cover add->", dev_ids)
if not dev_ids:
return
entities = await hass.async_add_executor_job(
_setup_entities,
hass,
dev_ids
)
async_add_entities(entities)
async_dispatcher_connect(
hass, TUYA_DISCOVERY_NEW.format(DEVICE_DOMAIN), async_discover_device
)
device_manager = hass.data[DOMAIN][TUYA_DEVICE_MANAGER]
device_ids = []
for (device_id, device) in device_manager.deviceMap.items():
if device.category in TUYA_HA_MAP.keys():
device_ids.append(device_id)
await async_discover_device(device_ids)
def _setup_entities(hass, device_ids: List):
"""Set up Tuya Cover."""
device_manager = hass.data[DOMAIN][TUYA_DEVICE_MANAGER]
entities = []
for device_id in device_ids:
device = device_manager.deviceMap[device_id]
if device is None:
continue
entities.append(TuyaHaCover(device, device_manager))
return entities
class TuyaHaCover(TuyaHaDevice, CoverEntity):
"""Tuya Switch Device."""
platform = 'cover'
# property
@property
def device_class(self) -> str:
"""Return Entity Properties."""
return DEVICE_CLASS_CURTAIN
@property
def is_closed(self) -> bool:
"""Return if the cover is closed or not."""
return None
@property
def current_cover_position(self) -> int:
return self.tuyaDevice.status.get(DPCODE_PERCENT_CONTROL, 0)
def open_cover(self, **kwargs: Any) -> None:
"""Open the cover."""
self.tuyaDeviceManager.sendCommands(
self.tuyaDevice.id, [{'code': DPCODE_CONTROL, 'value': 'open'}])
def close_cover(self, **kwargs: Any) -> None:
"""Close cover."""
self.tuyaDeviceManager.sendCommands(
self.tuyaDevice.id, [{'code': DPCODE_CONTROL, 'value': 'close'}])
def stop_cover(self, **kwargs):
"""Stop the cover."""
self.tuyaDeviceManager.sendCommands(
self.tuyaDevice.id, [{'code': DPCODE_CONTROL, 'value': 'stop'}])
def set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
print("cover-->", kwargs)
self.tuyaDeviceManager.sendCommands(
self.tuyaDevice.id, [{'code': DPCODE_PERCENT_CONTROL, 'value': kwargs[ATTR_POSITION]}])
@property
def supported_features(self):
"""Flag supported features."""
supports = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP
if DPCODE_PERCENT_CONTROL in self.tuyaDevice.status:
supports = supports | SUPPORT_SET_POSITION
return supports
``` |
{
"source": "JiaoPaner/craft-det",
"score": 2
} |
#### File: JiaoPaner/craft-det/x2paddle_code.py
```python
import paddle
import math
from x2paddle.op_mapper.onnx2paddle import onnx_custom_layer as x2paddle_nn
class ONNXModel(paddle.nn.Layer):
def __init__(self):
super(ONNXModel, self).__init__()
self.conv0 = paddle.nn.Conv2D(in_channels=3, out_channels=64, kernel_size=[3, 3], padding=1)
self.batchnorm0 = paddle.nn.BatchNorm(num_channels=64, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu0 = paddle.nn.ReLU()
self.conv1 = paddle.nn.Conv2D(in_channels=64, out_channels=64, kernel_size=[3, 3], padding=1)
self.batchnorm1 = paddle.nn.BatchNorm(num_channels=64, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu1 = paddle.nn.ReLU()
self.pool0 = paddle.nn.MaxPool2D(kernel_size=[2, 2], stride=2)
self.conv2 = paddle.nn.Conv2D(in_channels=64, out_channels=128, kernel_size=[3, 3], padding=1)
self.batchnorm2 = paddle.nn.BatchNorm(num_channels=128, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu2 = paddle.nn.ReLU()
self.conv3 = paddle.nn.Conv2D(in_channels=128, out_channels=128, kernel_size=[3, 3], padding=1)
self.batchnorm3 = paddle.nn.BatchNorm(num_channels=128, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu3 = paddle.nn.ReLU()
self.pool1 = paddle.nn.MaxPool2D(kernel_size=[2, 2], stride=2)
self.conv4 = paddle.nn.Conv2D(in_channels=128, out_channels=256, kernel_size=[3, 3], padding=1)
self.batchnorm4 = paddle.nn.BatchNorm(num_channels=256, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu4 = paddle.nn.ReLU()
self.conv5 = paddle.nn.Conv2D(in_channels=256, out_channels=256, kernel_size=[3, 3], padding=1)
self.batchnorm5 = paddle.nn.BatchNorm(num_channels=256, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu5 = paddle.nn.ReLU()
self.conv6 = paddle.nn.Conv2D(in_channels=256, out_channels=256, kernel_size=[3, 3], padding=1)
self.batchnorm6 = paddle.nn.BatchNorm(num_channels=256, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu6 = paddle.nn.ReLU()
self.pool2 = paddle.nn.MaxPool2D(kernel_size=[2, 2], stride=2)
self.conv7 = paddle.nn.Conv2D(in_channels=256, out_channels=512, kernel_size=[3, 3], padding=1)
self.batchnorm7 = paddle.nn.BatchNorm(num_channels=512, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu7 = paddle.nn.ReLU()
self.conv8 = paddle.nn.Conv2D(in_channels=512, out_channels=512, kernel_size=[3, 3], padding=1)
self.batchnorm8 = paddle.nn.BatchNorm(num_channels=512, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu8 = paddle.nn.ReLU()
self.conv9 = paddle.nn.Conv2D(in_channels=512, out_channels=512, kernel_size=[3, 3], padding=1)
self.batchnorm9 = paddle.nn.BatchNorm(num_channels=512, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu9 = paddle.nn.ReLU()
self.pool3 = paddle.nn.MaxPool2D(kernel_size=[2, 2], stride=2)
self.conv10 = paddle.nn.Conv2D(in_channels=512, out_channels=512, kernel_size=[3, 3], padding=1)
self.batchnorm10 = paddle.nn.BatchNorm(num_channels=512, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu10 = paddle.nn.ReLU()
self.conv11 = paddle.nn.Conv2D(in_channels=512, out_channels=512, kernel_size=[3, 3], padding=1)
self.batchnorm11 = paddle.nn.BatchNorm(num_channels=512, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.pool4 = paddle.nn.MaxPool2D(kernel_size=[3, 3], stride=1, padding=1)
self.conv12 = paddle.nn.Conv2D(in_channels=512, out_channels=1024, kernel_size=[3, 3], padding=6, dilation=6)
self.conv13 = paddle.nn.Conv2D(in_channels=1024, out_channels=1024, kernel_size=[1, 1])
self.conv14 = paddle.nn.Conv2D(in_channels=1536, out_channels=512, kernel_size=[1, 1])
self.batchnorm12 = paddle.nn.BatchNorm(num_channels=512, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu11 = paddle.nn.ReLU()
self.conv15 = paddle.nn.Conv2D(in_channels=512, out_channels=256, kernel_size=[3, 3], padding=1)
self.batchnorm13 = paddle.nn.BatchNorm(num_channels=256, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu12 = paddle.nn.ReLU()
self.conv16 = paddle.nn.Conv2D(in_channels=768, out_channels=256, kernel_size=[1, 1])
self.batchnorm14 = paddle.nn.BatchNorm(num_channels=256, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu13 = paddle.nn.ReLU()
self.conv17 = paddle.nn.Conv2D(in_channels=256, out_channels=128, kernel_size=[3, 3], padding=1)
self.batchnorm15 = paddle.nn.BatchNorm(num_channels=128, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu14 = paddle.nn.ReLU()
self.conv18 = paddle.nn.Conv2D(in_channels=384, out_channels=128, kernel_size=[1, 1])
self.batchnorm16 = paddle.nn.BatchNorm(num_channels=128, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu15 = paddle.nn.ReLU()
self.conv19 = paddle.nn.Conv2D(in_channels=128, out_channels=64, kernel_size=[3, 3], padding=1)
self.batchnorm17 = paddle.nn.BatchNorm(num_channels=64, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu16 = paddle.nn.ReLU()
self.conv20 = paddle.nn.Conv2D(in_channels=192, out_channels=64, kernel_size=[1, 1])
self.batchnorm18 = paddle.nn.BatchNorm(num_channels=64, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu17 = paddle.nn.ReLU()
self.conv21 = paddle.nn.Conv2D(in_channels=64, out_channels=32, kernel_size=[3, 3], padding=1)
self.batchnorm19 = paddle.nn.BatchNorm(num_channels=32, momentum=0.8999999761581421, epsilon=9.999999747378752e-06, is_test=True)
self.relu18 = paddle.nn.ReLU()
self.conv22 = paddle.nn.Conv2D(in_channels=32, out_channels=32, kernel_size=[3, 3], padding=1)
self.relu19 = paddle.nn.ReLU()
self.conv23 = paddle.nn.Conv2D(in_channels=32, out_channels=32, kernel_size=[3, 3], padding=1)
self.relu20 = paddle.nn.ReLU()
self.conv24 = paddle.nn.Conv2D(in_channels=32, out_channels=16, kernel_size=[3, 3], padding=1)
self.relu21 = paddle.nn.ReLU()
self.conv25 = paddle.nn.Conv2D(in_channels=16, out_channels=16, kernel_size=[1, 1])
self.relu22 = paddle.nn.ReLU()
self.conv26 = paddle.nn.Conv2D(in_channels=16, out_channels=2, kernel_size=[1, 1])
def forward(self, x2paddle_input):
x2paddle_205 = paddle.full(dtype='int64', shape=[1], fill_value=2)
x2paddle_208 = paddle.full(dtype='int64', shape=[1], fill_value=3)
x2paddle_231 = paddle.full(dtype='int64', shape=[1], fill_value=2)
x2paddle_234 = paddle.full(dtype='int64', shape=[1], fill_value=3)
x2paddle_257 = paddle.full(dtype='int64', shape=[1], fill_value=2)
x2paddle_260 = paddle.full(dtype='int64', shape=[1], fill_value=3)
x2paddle_155 = self.conv0(x2paddle_input)
x2paddle_156 = self.batchnorm0(x2paddle_155)
x2paddle_157 = self.relu0(x2paddle_156)
x2paddle_158 = self.conv1(x2paddle_157)
x2paddle_159 = self.batchnorm1(x2paddle_158)
x2paddle_160 = self.relu1(x2paddle_159)
x2paddle_161 = self.pool0(x2paddle_160)
x2paddle_162 = self.conv2(x2paddle_161)
x2paddle_163 = self.batchnorm2(x2paddle_162)
x2paddle_164 = self.relu2(x2paddle_163)
x2paddle_165 = self.conv3(x2paddle_164)
x2paddle_166 = self.batchnorm3(x2paddle_165)
x2paddle_167 = self.relu3(x2paddle_166)
x2paddle_168 = self.pool1(x2paddle_167)
x2paddle_256 = paddle.shape(input=x2paddle_167)
x2paddle_256 = paddle.cast(x=x2paddle_256, dtype='int64')
x2paddle_259 = paddle.shape(input=x2paddle_167)
x2paddle_259 = paddle.cast(x=x2paddle_259, dtype='int64')
x2paddle_169 = self.conv4(x2paddle_168)
x2paddle_258 = paddle.gather(x=x2paddle_256, index=x2paddle_257)
x2paddle_261 = paddle.gather(x=x2paddle_259, index=x2paddle_260)
x2paddle_170 = self.batchnorm4(x2paddle_169)
x2paddle_262 = paddle.reshape(x=x2paddle_258, shape=[1])
x2paddle_263 = paddle.reshape(x=x2paddle_261, shape=[1])
x2paddle_171 = self.relu4(x2paddle_170)
x2paddle_264 = paddle.concat(x=[x2paddle_262, x2paddle_263])
x2paddle_172 = self.conv5(x2paddle_171)
x2paddle_271 = paddle.cast(x=x2paddle_264, dtype='int64')
x2paddle_173 = self.batchnorm5(x2paddle_172)
x2paddle_174 = self.relu5(x2paddle_173)
x2paddle_175 = self.conv6(x2paddle_174)
x2paddle_230 = paddle.shape(input=x2paddle_174)
x2paddle_230 = paddle.cast(x=x2paddle_230, dtype='int64')
x2paddle_233 = paddle.shape(input=x2paddle_174)
x2paddle_233 = paddle.cast(x=x2paddle_233, dtype='int64')
x2paddle_176 = self.batchnorm6(x2paddle_175)
x2paddle_232 = paddle.gather(x=x2paddle_230, index=x2paddle_231)
x2paddle_235 = paddle.gather(x=x2paddle_233, index=x2paddle_234)
x2paddle_177 = self.relu6(x2paddle_176)
x2paddle_236 = paddle.reshape(x=x2paddle_232, shape=[1])
x2paddle_237 = paddle.reshape(x=x2paddle_235, shape=[1])
x2paddle_178 = self.pool2(x2paddle_177)
x2paddle_238 = paddle.concat(x=[x2paddle_236, x2paddle_237])
x2paddle_179 = self.conv7(x2paddle_178)
x2paddle_245 = paddle.cast(x=x2paddle_238, dtype='int64')
x2paddle_180 = self.batchnorm7(x2paddle_179)
x2paddle_181 = self.relu7(x2paddle_180)
x2paddle_182 = self.conv8(x2paddle_181)
x2paddle_183 = self.batchnorm8(x2paddle_182)
x2paddle_184 = self.relu8(x2paddle_183)
x2paddle_185 = self.conv9(x2paddle_184)
x2paddle_204 = paddle.shape(input=x2paddle_184)
x2paddle_204 = paddle.cast(x=x2paddle_204, dtype='int64')
x2paddle_207 = paddle.shape(input=x2paddle_184)
x2paddle_207 = paddle.cast(x=x2paddle_207, dtype='int64')
x2paddle_186 = self.batchnorm9(x2paddle_185)
x2paddle_206 = paddle.gather(x=x2paddle_204, index=x2paddle_205)
x2paddle_209 = paddle.gather(x=x2paddle_207, index=x2paddle_208)
x2paddle_187 = self.relu9(x2paddle_186)
x2paddle_210 = paddle.reshape(x=x2paddle_206, shape=[1])
x2paddle_211 = paddle.reshape(x=x2paddle_209, shape=[1])
x2paddle_188 = self.pool3(x2paddle_187)
x2paddle_212 = paddle.concat(x=[x2paddle_210, x2paddle_211])
x2paddle_189 = self.conv10(x2paddle_188)
x2paddle_219 = paddle.cast(x=x2paddle_212, dtype='int64')
x2paddle_190 = self.batchnorm10(x2paddle_189)
x2paddle_191 = self.relu10(x2paddle_190)
x2paddle_192 = self.conv11(x2paddle_191)
x2paddle_193 = self.batchnorm11(x2paddle_192)
x2paddle_194 = self.pool4(x2paddle_193)
x2paddle_195 = self.conv12(x2paddle_194)
x2paddle_196 = self.conv13(x2paddle_195)
x2paddle_197 = paddle.concat(x=[x2paddle_196, x2paddle_193], axis=1)
x2paddle_198 = self.conv14(x2paddle_197)
x2paddle_199 = self.batchnorm12(x2paddle_198)
x2paddle_200 = self.relu11(x2paddle_199)
x2paddle_201 = self.conv15(x2paddle_200)
x2paddle_202 = self.batchnorm13(x2paddle_201)
x2paddle_203 = self.relu12(x2paddle_202)
x2paddle_214 = paddle.shape(input=x2paddle_203)
x2paddle_214 = paddle.cast(x=x2paddle_214, dtype='int64')
x2paddle_218 = paddle.slice(input=x2paddle_214, axes=[0], starts=[0], ends=[2])
x2paddle_220 = paddle.concat(x=[x2paddle_218, x2paddle_219])
x2paddle_220_nc,x2paddle_220_hw = paddle.split(x=x2paddle_220, num_or_sections=[2, 2])
x2paddle_220_hw = paddle.cast(x=x2paddle_220_hw, dtype='int32')
x2paddle_222 = paddle.nn.functional.interpolate(x=x2paddle_203, size=x2paddle_220_hw, mode='bilinear')
x2paddle_223 = paddle.concat(x=[x2paddle_222, x2paddle_184], axis=1)
x2paddle_224 = self.conv16(x2paddle_223)
x2paddle_225 = self.batchnorm14(x2paddle_224)
x2paddle_226 = self.relu13(x2paddle_225)
x2paddle_227 = self.conv17(x2paddle_226)
x2paddle_228 = self.batchnorm15(x2paddle_227)
x2paddle_229 = self.relu14(x2paddle_228)
x2paddle_240 = paddle.shape(input=x2paddle_229)
x2paddle_240 = paddle.cast(x=x2paddle_240, dtype='int64')
x2paddle_244 = paddle.slice(input=x2paddle_240, axes=[0], starts=[0], ends=[2])
x2paddle_246 = paddle.concat(x=[x2paddle_244, x2paddle_245])
x2paddle_246_nc,x2paddle_246_hw = paddle.split(x=x2paddle_246, num_or_sections=[2, 2])
x2paddle_246_hw = paddle.cast(x=x2paddle_246_hw, dtype='int32')
x2paddle_248 = paddle.nn.functional.interpolate(x=x2paddle_229, size=x2paddle_246_hw, mode='bilinear')
x2paddle_249 = paddle.concat(x=[x2paddle_248, x2paddle_174], axis=1)
x2paddle_250 = self.conv18(x2paddle_249)
x2paddle_251 = self.batchnorm16(x2paddle_250)
x2paddle_252 = self.relu15(x2paddle_251)
x2paddle_253 = self.conv19(x2paddle_252)
x2paddle_254 = self.batchnorm17(x2paddle_253)
x2paddle_255 = self.relu16(x2paddle_254)
x2paddle_266 = paddle.shape(input=x2paddle_255)
x2paddle_266 = paddle.cast(x=x2paddle_266, dtype='int64')
x2paddle_270 = paddle.slice(input=x2paddle_266, axes=[0], starts=[0], ends=[2])
x2paddle_272 = paddle.concat(x=[x2paddle_270, x2paddle_271])
x2paddle_272_nc,x2paddle_272_hw = paddle.split(x=x2paddle_272, num_or_sections=[2, 2])
x2paddle_272_hw = paddle.cast(x=x2paddle_272_hw, dtype='int32')
x2paddle_274 = paddle.nn.functional.interpolate(x=x2paddle_255, size=x2paddle_272_hw, mode='bilinear')
x2paddle_275 = paddle.concat(x=[x2paddle_274, x2paddle_167], axis=1)
x2paddle_276 = self.conv20(x2paddle_275)
x2paddle_277 = self.batchnorm18(x2paddle_276)
x2paddle_278 = self.relu17(x2paddle_277)
x2paddle_279 = self.conv21(x2paddle_278)
x2paddle_280 = self.batchnorm19(x2paddle_279)
x2paddle_281 = self.relu18(x2paddle_280)
x2paddle_282 = self.conv22(x2paddle_281)
x2paddle_283 = self.relu19(x2paddle_282)
x2paddle_284 = self.conv23(x2paddle_283)
x2paddle_285 = self.relu20(x2paddle_284)
x2paddle_286 = self.conv24(x2paddle_285)
x2paddle_287 = self.relu21(x2paddle_286)
x2paddle_288 = self.conv25(x2paddle_287)
x2paddle_289 = self.relu22(x2paddle_288)
x2paddle_290 = self.conv26(x2paddle_289)
x2paddle_output = paddle.transpose(x=x2paddle_290, perm=[0, 2, 3, 1])
return x2paddle_output, x2paddle_281
def main(x2paddle_input):
# There are 1 inputs.
# x2paddle_input: shape-[1, 3, -1, -1], type-float32.
paddle.disable_static()
params = paddle.load('/Volumes/storage/projects/python/craft-det/pd_model/model.pdparams')
model = ONNXModel()
model.set_dict(params, use_structured_name=True)
model.eval()
out = model(x2paddle_input)
return out
if __name__ == '__main__':
from skimage import io
import numpy as np
import imgproc
import craft_utils
import cv2
image_path = "./data/ug03.jpg"
image = cv2.imread(image_path)
#image = cv2.resize(image,(96,64))
print(image.shape)
# image, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(image, 960,
# interpolation=cv2.INTER_LINEAR,
# mag_ratio=1.5)
# print(image.shape, target_ratio, size_heatmap)
# ratio_h = ratio_w = 1 / target_ratio
x = imgproc.normalizeMeanVariance(image)
x = paddle.Tensor(x)
x = paddle.transpose(x,(2,0,1))
x = paddle.unsqueeze(x,0)
y, feature = main(x)
# make score and link map
score_text = y[0, :, :, 0].numpy()
score_link = y[0, :, :, 1].numpy()
#print(score_text.shape,score_link.shape)
# Post-processing
#boxes, polys = craft_utils.getDetBoxes(score_text, score_link, text_threshold=0.7, link_threshold=0.4, low_text=0.4, poly=False)
boxes, _, _ = craft_utils.getDetBoxes_core(score_text, score_link, text_threshold=0.7, link_threshold=0.4, low_text=0.4)
# coordinate adjustment
boxes = craft_utils.adjustResultCoordinates(boxes, 1, 1)
for box in boxes:
box = np.array(box).astype(np.int32)
print(box)
print("==================")
cv2.polylines(image, [box.reshape((-1, 1, 2))], True, color=(0, 0, 255), thickness=2)
cv2.imwrite("./data/vis.jpg",image)
``` |
{
"source": "jiaoqiyuan/Tests",
"score": 4
} |
#### File: Python/gui/gui.py
```python
from tkinter import *
window = Tk()
window.title("Welcome to LikeGeeks app")
#lbl = Label(window, text="Hello", font=("Arial Bold", 50))
lbl = Label(window, text="Hello")
lbl.grid(column=0, row=0)
window.geometry('350x200')
txt = Entry(window, width=10, state='disabled')
txt.grid(column=2, row=0)
txt.focus()
def clicked():
res = "Welcome to " + txt.get()
lbl.configure(text = res)
btn = Button(window, text="Click Me", bg="orange", fg="red", command=clicked)
btn.grid(column=1, row=0)
window.mainloop()
```
#### File: python-practice/chapter8-func/make_album.py
```python
def make_albun(name, album, songs=''):
singer = {'name': name, 'album': album}
if songs:
singer['songs'] = songs
return singer
singer1 = make_albun('jay', 'fantasy', '12')
print(singer1)
singer2 = make_albun('jollin', '72bian')
print(singer2)
singer3 = make_albun('eason', '10nian')
print(singer3)
while True:
singer_name = input("\nPlease input a singer's name:")
if singer_name == 'q':
break
album_name = input("input his/her album:")
if album_name == 'q':
break
singer = make_albun(singer_name, album_name)
print(singer)
```
#### File: python-practice/chapter8-func/make_shirt.py
```python
def make_shirt(size, string='I love python'):
print("\nThe shirt's size is:" + size + ", string is:" + string + ".")
make_shirt('38','Hello World')
make_shirt(string='Hello World', size='39')
make_shirt('25')
make_shirt('35')
make_shirt('45')
```
#### File: python-practice/chapter9-class/user.py
```python
class User():
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
self.login_attempts = 0
def describe_user(self):
print("The user's full name is: " + self.first_name.title() + " " + self.last_name.title())
def greet_user(self):
print("Welcome, " + self.first_name.title() + " " + self.last_name.title())
def increment_login_attempts(self):
self.login_attempts += 1
def reset_login_attempts(self):
self.login_attempts = 0
#user1 = User('jiao', 'qiyuan')
#user2 = User('liu', 'cuicui')
#user3 = User('jiao', 'xuhan')
#user1.describe_user()
#user1.greet_user()
#user2.describe_user()
#user2.greet_user()
#user3.describe_user()
#user3.greet_user()
user4 = User('jiao', 'qiyuan')
user4.increment_login_attempts()
user4.increment_login_attempts()
print(user4.login_attempts)
user4.reset_login_attempts()
print(user4.login_attempts)
```
#### File: Python/var_wide/var_wide.py
```python
def deal(content) :
print(content)
if __name__ == '__main__' :
contents = ['hello world', 'This is a test']
for content in contents :
deal(content)
``` |
{
"source": "jiaoran198916/Douban250",
"score": 2
} |
#### File: Douban250/Douban250/pipelines.py
```python
from itemadapter import ItemAdapter
class Douban250Pipeline:
def process_item(self, item, spider):
return item
# import pymongo
# from scrapy.conf import settings
#
# class Douban250Pipeline(object):
# def __init__(self):
# host = settings["MONGODB_HOST"]
# port = settings["MONGODB_PORT"]
# dbname = settings["MONGODB_DBNAME"]
# sheetname = settings["MONGODB_SHEETNAME"]
# # 创建MONGODB数据库链接
# client = pymongo.MongoClient(host=host, port=port)
# # 指定数据库
# mydb = client[dbname]
# # 存放数据的数据库表名
# self.post = mydb[sheetname]
#
# def process_item(self, item, spider):
# data = dict(item)
# self.post.insert(data)
# return item
# MONGODB 主机名
# MONGODB_HOST = "127.0.0.1"
# MONGODB 端口号
# MONGODB_PORT = 27017
# 数据库名称
# MONGODB_DBNAME = "Douban"
# 存放数据的表名称
# MONGODB_SHEETNAME = "doubanmovies"
``` |
{
"source": "jiaoren/stydish",
"score": 2
} |
#### File: stydish/users/models.py
```python
from __future__ import unicode_literals, absolute_import
from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from django.core.validators import RegexValidator
from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
cook = 'cook'
eater = 'eater'
USER_TYPE = (
(cook, 'Cook'),
(eater, 'Eater'),
)
user_type = models.CharField(max_length=10, choices=USER_TYPE, default=eater)
street = models.CharField(max_length=50)
city = models.CharField(max_length=20)
state = models.CharField(max_length=20)
zip_code = models.CharField(max_length=6)
description = models.CharField(max_length=140, blank=True)
avatar = models.ImageField(upload_to='static/images/avatar', default='static/images/space-th-sm.jpg')
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the "
"format: '+""999999999'."" Up to 15 digits allowed.")
phone_number = models.CharField(max_length=11, validators=[phone_regex], blank=True) # validators should be a list
receive_updates = models.BooleanField(default=False)
def __unicode__(self):
return self.first_name + " " + self.last_name
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
def is_cook(self):
return self.user_type == cook
def get_full_address(self):
return ','.join((self.street, self.city, self.state, self.zip_code))
``` |
{
"source": "jiaorenyu/koubei",
"score": 3
} |
#### File: jiaorenyu/koubei/arima_util.py
```python
import pandas as pd
import pyflux as pf
import sys
import date_util
import common
def get_history_pay(action_count, start_date, end_date, predict_start, predict_end):
date = []
count = []
date_list = date_util.get_date_list(start_date, end_date)
predict_list = date_util.get_date_list(predict_start, predict_end)
real_list = []
for day in date_list:
value = int(action_count[day]) if day in action_count else 0
count.append(value)
date.append(len(count))
for day in predict_list:
value = int(action_count[day]) if day in action_count else 0
real_list.append(value)
return count, date, real_list
def arima(count, index, forward):
data = pd.DataFrame(data=count, index=index)
model = pf.ARIMA(data=data, ar=5, ma=5, integ=0)
x = model.fit("MLE")
result = model.predict(h=forward, intervals=False)
kv = result['0'].to_dict()
keys = list(kv.keys())
keys.sort()
value = []
for key in keys:
v = kv[key]
if v < 0:
v = 0
value.append(int(v))
return value
if __name__ == "__main__":
fn = sys.argv[1]
start_date = sys.argv[2]
end_date = sys.argv[3]
predict_start = sys.argv[4]
predict_end = sys.argv[5]
action_count = common.load_action_stat(fn)
count, date, real_value = get_history_pay(action_count, start_date, end_date, predict_start, predict_end)
predict_value = arima(count, date, 14)
print(predict_value, real_value)
if len(real_value) == 0 or len(predict_value) == 0:
exit()
print(common.shop_cost(predict_value, real_value))
```
#### File: jiaorenyu/koubei/time_util.py
```python
import time
def get_timestamp(datestr, formatstr = "%Y-%m-%d"):
return time.mktime(time.strptime(datestr, formatstr))
def get_datestr(seconds, formatstr = "%Y-%m-%d"):
return time.strftime(formatstr, time.localtime(seconds))
``` |
{
"source": "jiaorenyu/learning",
"score": 3
} |
#### File: learning/python/multipro.py
```python
from multiprocessing import Process
import time
def f(name):
time.sleep(5)
print('hello')
if __name__ == '__main__':
p = Process(target=f, args=('jiaorenyu',))
p1 = Process(target=f, args=('jiaorenyu',))
p.start()
p1.start()
p1.join()
p.join()
```
#### File: learning/tornado/entry.py
```python
class Entry(tornado.web.UIModule):
def embedded_css(self):
return ".entry { margin-bottom: 1em; }"
def render(self, entry, show_comments=False):
return self.render_string(
"module-entry.html", show_comments=show_comments)
```
#### File: learning/tornado/server.py
```python
import tornado.ioloop
import tornado.httpserver
import tornado.options
from tornado.web import RequestHandler, Application
import uimodules
class HomeHandler(tornado.web.RequestHandler):
def get(self):
#entries = self.db.query("SELECT * FROM entries ORDER BY date DESC"))
entries = ["1", "2"]
self.render("home.html", entries=entries)
class EntryHandler(tornado.web.RequestHandler):
def get(self, entry_id):
#entry = self.db.get("SELECT * FROM entries WHERE id = %s", entry_id)
entries = ["1", "2"]
if not entry: raise tornado.web.HTTPError(404)
self.render("entry.html", entry=entry)
settings = {
"ui_modules": uimodules,
}
class MainHandler(RequestHandler):
def get(self):
if not self.get_cookie("name"):
self.set_cookie("name", "maaoyu")
self.write("name:maaoyu")
else:
self.write(self.get_cookie("name"))
def post(self):
self.set_header("Content-Type", "text/plain")
self.write("You wrote " + self.get_body_argument("message"))
application = tornado.web.Application([
(r"/", MainHandler),
(r"/entry/([0-9]+)", EntryHandler),
], **settings)
application.listen(8081)
tornado.ioloop.IOLoop.instance().start()
``` |
{
"source": "jiaoshenmene/wangdai",
"score": 3
} |
#### File: wangdai/spiders/zj_sprider.py
```python
import scrapy
class Sprider(scrapy.Spider):
name = "zj"
start_urls = [
'https://www.wdzj.com/pingji.html'
]
def parse(self , response):
for quote in response.css('div.tb-platname'):
yield {
'name': quote.css('a::text').extract_first(),
}
``` |
{
"source": "JiaoTangDouBi/Leetcode_UNNC",
"score": 3
} |
#### File: Leetcode_UNNC/136. Single Number/singleNumber.py
```python
class Solution(object):
def singleNumber_mem_On(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
cache = {}
for i in range(len(nums)):
val = nums[i]
if val in cache:
cache[val] += 1
else:
cache[val] = 1
for key in cache:
if cache[key] == 1:
return key
def singleNumber(self, nums):
output = 0
for n in nums:
output ^= n
return output
```
#### File: Leetcode_UNNC/139. Word Break/wordBreak.py
```python
class Solution(object):
def wordBreak(self, s, wordDict):
"""
:type s: str
:type wordDict: List[str]
:rtype: bool
"""
wordDict = set(wordDict)
dp = [False]*(len(s)+1)
dp[0] = True
for i in range(1, len(s)+1):
for j in range(i):
if dp[j] and s[j:i] in wordDict:
dp[i] = True
return dp[-1]
```
#### File: Leetcode_UNNC/236. Lowest Common Ancestor of a Binary Tree/lowestCommonAncestor.py
```python
from collections import deque
class Solution(object):
def lowestCommonAncestor_rev(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if root in (None, p, q):
return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
# find both p and q
if left and right:
return root
# return None if nor, or return p or q
else:
return left or right
# super slow?????
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
def path(root, target):
stack = deque([([root], root)])
while stack:
(path, node) = stack.pop()
if node != target:
if node.right:
stack.append((path + [node.right], node.right))
if node.left:
stack.append((path + [node.left], node.left))
else:
break
return path
if root in (None, p, q):
return root
path_p = path(root, p)
path_q = path(root, q)
i = 0
while i < len(path_p) and i < len(path_q):
pp = path_p[i]
pq = path_q[i]
if pp == pq:
i += 1
else:
break
return path_p[i-1]
```
#### File: Leetcode_UNNC/242. Valid Anagram/isAnagram.py
```python
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
cache = {}
for i in s:
if i not in cache:
cache[i] = 1
else:
cache[i] += 1
for i in t:
if i not in cache:
return False
else:
cache[i] -= 1
for i in cache:
if cache[i] > 0:
return False
return True
```
#### File: Leetcode_UNNC/24. Swap Nodes in Pairs/swapPairs.py
```python
class Solution(object):
def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
result = pre = ListNode(0)
pre.next = head
while pre.next and pre.next.next:
# pre -> first -> second -> second.next
# => pre -> second -> first -> second.next
first = pre.next
second = first.next
pre.next, second.next, first.next = second, first, second.next
pre = first
return result.next
```
#### File: Leetcode_UNNC/344. Reverse String/reverseString.py
```python
class Solution(object):
def reverseString(self, s):
"""
:type s: str
:rtype: str
"""
s = list(s)
left = 0
right = len(s) - 1
while left < right:
s[left], s[right] = s[right], s[left]
left += 1
right -= 1
return ''.join(s)
```
#### File: Leetcode_UNNC/349. Intersection of Two Arrays/intersection.py
```python
class Solution(object):
def intersection(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
cache = {}
output = set([])
for i in range(len(nums1)):
val = nums1[i]
if val not in cache:
cache[val] = 1
else:
cache[val] += 1
for i in range(len(nums2)):
val = nums2[i]
if val in cache and val not in output:
output.add(val)
return list(output)
``` |
{
"source": "jiaowozidaren/graduation_paper",
"score": 3
} |
#### File: jiaowozidaren/graduation_paper/Ising.py
```python
import numpy as np
class Ising_mat(object):
def __init__(self,h,w, T ,J = 1):
self.h = h
self.w = w
self.N = h*w
self.J = J
self.T = T
self.mat = np.ones([self.h, self.w], int)
def random_state(self):
self.mat = np.random.randint(0, 2, (self.h,self.w)) * 2 - 1
def neighbor(self,x,y):
return (((x+1)%self.h,y),(x,(y+1)%self.w),((x-1)%self.h,y),(x,(y-1)%self.w))
def unit_E(self,x,y):
sum_unit_E = 0
for i in self.neighbor(x,y):
sum_unit_E += self.mat[i]
return -self.J * self.mat[x,y]*sum_unit_E
def total_E(self):
sum_total_E = 0
for i in range(self.h):
for j in range(self.w):
sum_total_E += self.unit_E(i,j)
return sum_total_E
def total_M(self):
return abs(np.sum(self.mat))
def random_select(self):
x = np.random.randint(self.h)
y = np.random.randint(self.w)
return (x,y)
def single_flip(self):
(x,y) = self.random_select()
s = self.mat[x,y]
is_flip = False
c_acc = self.unit_E(x,y)*(s-(-s))
p_acc = np.exp(c_acc)/self.T
if c_acc > 0:
s *= -1
is_flip = True
elif np.random.rand() < p_acc:
s *= -1
is_flip = True
self.mat[x,y] = s
return (x,y,is_flip)
def cluster_flip(self):
(x,y) = self.random_select()
P_add = 1 - np.exp(-2 * self.J / self.T)
stack = [(x,y)]
s = self.mat[x,y]
lable = np.ones([self.h, self.w])
lable[x, y] = 0
while len(stack)>0:
#print(stack)
(current_x, current_y) = stack.pop()
self.mat[current_x,current_y] *= -1
for i in self.neighbor(current_x,current_y):
if self.mat[i] == s and lable[i] and np.random.rand() < P_add:
stack.append(i)
lable[i] = 0
``` |
{
"source": "jiaoyiping630/pytorch-segmentation-pathology",
"score": 2
} |
#### File: pytorch-segmentation-pathology/src/predict.py
```python
import os
import pickle
import argparse
import yaml
import numpy as np
from collections import OrderedDict
from pathlib import Path
from tqdm import tqdm
import torch
from PIL import Image
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from utils.preprocess import minmax_normalize, meanstd_normalize
from torch.utils.data import DataLoader, Dataset
class PredictDataset(Dataset):
def __init__(self,
image_folder,
target_size=(512, 512), # 这应该是输出图像的尺寸
net_type='unet',
ignore_index=255, # 忽略的标签值,在VOC里,255表示轮廓线
):
self.image_folder = Path(image_folder)
assert net_type in ['unet', 'deeplab']
self.net_type = net_type
self.ignore_index = ignore_index
# 这一系列的操作,都是为了得到图像和label的路径列表
from pinglib.files import get_file_list_recursive, purename
image_paths = get_file_list_recursive(image_folder, 'jpg')
self.image_paths = image_paths
self.purenames = [purename(path) for path in image_paths]
# 病理图中不要用resize,看看这里有什么需要注意的
# 可能不需要,因为我们的图像尺寸是统一的!
if isinstance(target_size, str):
target_size = eval(target_size)
self.resizer = None
def __len__(self):
return len(self.image_paths)
def __getitem__(self, index):
img_path = self.image_paths[index]
img = np.array(Image.open(img_path))
if self.net_type == 'unet':
img = minmax_normalize(img)
img = meanstd_normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
else:
img = minmax_normalize(img, norm_range=(-1, 1))
img = img.transpose(2, 0, 1)
img = torch.FloatTensor(img)
return img, self.purenames[index]
def main():
from models.net import EncoderDecoderNet, SPPNet
from losses.multi import MultiClassCriterion
from logger.log import debug_logger
from logger.plot import history_ploter
from utils.optimizer import create_optimizer
from utils.metrics import compute_iou_batch
from pinglib.imgprocessing.basic import imwrite
gpu_id = 2
image_folder = r"D:\Projects\MARS-Stomach\Patches\Test_overlap"
save_folder = r"D:\Projects\pytorch-segmentation-pathology\predict\overlap_epoch_11"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
# 构建数据集
# 载入模型
parser = argparse.ArgumentParser()
parser.add_argument('config_path')
args = parser.parse_args()
config_path = Path(args.config_path)
config = yaml.load(open(config_path))
net_config = config['Net']
train_config = config['Train']
batch_size = train_config['batch_size']
dataset = PredictDataset(image_folder=image_folder)
dataloader = DataLoader(dataset, batch_size=batch_size * 2,
shuffle=False, num_workers=4, pin_memory=True)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
net_config['output_channels'] = 3
classes = np.arange(1, 3)
# Network
if 'unet' in net_config['dec_type']:
net_type = 'unet'
model = EncoderDecoderNet(**net_config)
else:
net_type = 'deeplab'
model = SPPNet(**net_config)
modelname = config_path.stem
output_dir = Path('../model') / modelname
log_dir = Path('../logs') / modelname
logger = debug_logger(log_dir)
logger.debug(config)
# To device
model = model.to(device)
# Restore model
model_path = output_dir.joinpath(f'model_tmp.pth')
logger.info(f'Resume from {model_path}')
param = torch.load(model_path)
model.load_state_dict(param)
del param
# Predict
model.eval()
with torch.no_grad():
# for batched in dataloader:
with tqdm(dataloader) as _tqdm:
for batched in _tqdm:
images, purenames = batched
images = images.to(device)
preds = model.tta(images, net_type=net_type)
preds = F.softmax(preds) # 进行softmax激活
preds = preds.detach().cpu().numpy() # 比如,是12x3x512x512的
for j in range(len(purenames)):
tumor_possibility = preds[j, 2, :, :]
target_path = os.path.join(save_folder, purenames[j] + '_mask.png')
imwrite((tumor_possibility * 255).astype(np.uint8), target_path)
if __name__ == "__main__":
# main()
# 接下来,把图片拼起来
from pinglib.toolkits.image_stitch import image_stitch
from pinglib.files import get_file_list, match_files, purename
from pinglib.imgprocessing.basic import imread, imwrite
#
# image_stitch(target_folder=r"D:\Projects\pytorch-segmentation-pathology\predict\overlap_epoch_11_stitch",
# result_folder=r"D:\Projects\pytorch-segmentation-pathology\predict\overlap_epoch_11",
# meta_paths=get_file_list(r"D:\Projects\MARS-Stomach\Patches\Test_overlap", 'pkl'),
# result_appendix='_mask', # 如果结果与原图文件名不同,e.g. a_0_0.jpg -> a_0_0_mask.jpg,则此处填'_mask',
# suffix='png',
# channels=1, # 结果的通道数
# )
# 最后,过滤背景,并保存为jpg
result_paths = get_file_list(r"D:\Projects\pytorch-segmentation-pathology\predict\overlap_epoch_11_stitch", 'jpg')
foreground_paths = get_file_list(r"E:\MARS-Stomach\Foregrounds_image", 'png')
[result_paths, foreground_paths] = match_files([result_paths, foreground_paths], ['_mask', '_foreground'])
for (result_path, foreground_path) in zip(result_paths, foreground_paths):
result_patch = imread(result_path)
foreground_patch = imread(foreground_path)
# 以128为阈值进行分割
result_patch[result_patch < 128] = 0
result_patch[result_patch >= 128] = 255
# 滤除前景
result_patch[foreground_patch == 0] = 0
target_path = os.path.join(r"D:\Projects\pytorch-segmentation-pathology\predict\overlap_epoch_11_final",
purename(result_path) + '.jpg')
imwrite(result_patch, target_path)
``` |
{
"source": "jiaozhe/BData",
"score": 2
} |
#### File: programs/cmcc/broadband_file_cleaner.py
```python
import os
from pathlib import Path
from loguru import logger
from openpyxl import load_workbook
# 初始配置信息
DATA_EXTS = [".xls", ".xlsx"]
DATA_DATE = "0212"
DATA_PATH = Path("C:/Users/John/Desktop/集运商机/预约单/" + DATA_DATE)
PROV_NAMES = ["安徽", "北京", "重庆", "福建", "甘肃", "广东", "广西",
"贵州", "海南", "河北", "河南", "黑龙江", "湖北", "湖南",
"吉林", "江苏", "江西", "辽宁", "内蒙古", "宁夏", "青海",
"山东", "山西", "陕西", "上海", "四川", "天津", "西藏",
"新疆", "云南", "浙江"]
PROV_INFO = dict()
for index, item in enumerate(PROV_NAMES):
# logger.info("%02d - %s" % (index, item))
PROV_INFO[item] = "%02d" % (index+1, )
# 切换工作目录
os.chdir(DATA_PATH)
if Path.cwd() != DATA_PATH:
logger.error("[Change Working Directory] Error!")
else:
logger.info("[Change Working Directory] Success!")
# 函数
def change_name(_old_name, _suffix):
for _prov_name in PROV_INFO:
if _prov_name in _old_name:
_prov_number = PROV_INFO[_prov_name]
_new_name = "%s-%s-%s" % (_prov_number, _prov_name, DATA_DATE)
return _new_name + _suffix
else:
continue
# 遍历处理数据文件
for excel in DATA_PATH.iterdir():
if excel.is_file() and excel.suffix in DATA_EXTS:
# 解析 Excel 文件名
excel_old_name = excel.name
excel_suffix = excel.suffix
excel_new_name = change_name(excel_old_name, excel_suffix)
logger.info("%s ==> %s" % (excel_old_name, excel_new_name))
# 删除 Excel 文件中的无用数据列
wb = load_workbook(excel_old_name)
ws = wb.active
ws.delete_cols(1, 4)
ws.delete_cols(2)
ws.delete_cols(3)
ws.delete_cols(4, 4)
wb.save(excel_new_name)
``` |
{
"source": "jiaozhe/MYZ",
"score": 2
} |
#### File: jiaozhe/MYZ/setup.py
```python
import os
import setuptools
# See Also:
# https://packaging.python.org/tutorials/packaging-projects/
# https://packaging.python.org/tutorials/distributing-packages/
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setuptools.setup(
name='MYZ',
version='0.2.0',
url='http://jiaozhe.me/',
author='<NAME>',
author_email='<EMAIL>',
description='A practical toolkit for Ming & Yun & Zhe.',
long_description=read('README.rst'),
license='BSD',
packages=setuptools.find_packages(),
# https://pypi.org/classifiers/
classifiers=[
"Development Status :: 5 - Production/Stable",
"Natural Language :: English",
"Natural Language :: Chinese (Simplified)",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
],
python_requires='>=3.7',
)
``` |
{
"source": "jiaozhentian/kubeflow-manifest-mirror",
"score": 2
} |
#### File: multi-user/pipelines-profile-controller/sync.py
```python
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import os
import base64
def main():
settings = get_settings_from_env()
server = server_factory(**settings)
server.serve_forever()
def get_settings_from_env(controller_port=None,
visualization_server_image=None, frontend_image=None,
visualization_server_tag=None, frontend_tag=None, disable_istio_sidecar=None,
minio_access_key=None, minio_secret_key=None, kfp_default_pipeline_root=None):
"""
Returns a dict of settings from environment variables relevant to the controller
Environment settings can be overridden by passing them here as arguments.
Settings are pulled from the all-caps version of the setting name. The
following defaults are used if those environment variables are not set
to enable backwards compatibility with previous versions of this script:
visualization_server_image: gcr.io/ml-pipeline/visualization-server
visualization_server_tag: value of KFP_VERSION environment variable
frontend_image: gcr.io/ml-pipeline/frontend
frontend_tag: value of KFP_VERSION environment variable
disable_istio_sidecar: Required (no default)
minio_access_key: Required (no default)
minio_secret_key: Required (no default)
"""
settings = dict()
settings["controller_port"] = \
controller_port or \
os.environ.get("CONTROLLER_PORT", "8080")
settings["visualization_server_image"] = \
visualization_server_image or \
os.environ.get("VISUALIZATION_SERVER_IMAGE", "registry.cn-zhangjiakou.aliyuncs.com/kubeflow-zhentian/visualization-server")
settings["frontend_image"] = \
frontend_image or \
os.environ.get("FRONTEND_IMAGE", "registry.cn-zhangjiakou.aliyuncs.com/kubeflow-zhentian/frontend")
# Look for specific tags for each image first, falling back to
# previously used KFP_VERSION environment variable for backwards
# compatibility
settings["visualization_server_tag"] = \
visualization_server_tag or \
os.environ.get("VISUALIZATION_SERVER_TAG") or \
os.environ["KFP_VERSION"]
settings["frontend_tag"] = \
frontend_tag or \
os.environ.get("FRONTEND_TAG") or \
os.environ["KFP_VERSION"]
settings["disable_istio_sidecar"] = \
disable_istio_sidecar if disable_istio_sidecar is not None \
else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true"
settings["minio_access_key"] = \
minio_access_key or \
base64.b64encode(bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8')
settings["minio_secret_key"] = \
minio_secret_key or \
base64.b64encode(bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8')
# KFP_DEFAULT_PIPELINE_ROOT is optional
settings["kfp_default_pipeline_root"] = \
kfp_default_pipeline_root or \
os.environ.get("KFP_DEFAULT_PIPELINE_ROOT")
return settings
def server_factory(visualization_server_image,
visualization_server_tag, frontend_image, frontend_tag,
disable_istio_sidecar, minio_access_key,
minio_secret_key, kfp_default_pipeline_root=None,
url="", controller_port=8080):
"""
Returns an HTTPServer populated with Handler with customized settings
"""
class Controller(BaseHTTPRequestHandler):
def sync(self, parent, children):
# parent is a namespace
namespace = parent.get("metadata", {}).get("name")
pipeline_enabled = parent.get("metadata", {}).get(
"labels", {}).get("pipelines.kubeflow.org/enabled")
if pipeline_enabled != "true":
return {"status": {}, "children": []}
desired_configmap_count = 1
desired_resources = []
if kfp_default_pipeline_root:
desired_configmap_count = 2
desired_resources += [{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "kfp-launcher",
"namespace": namespace,
},
"data": {
"defaultPipelineRoot": kfp_default_pipeline_root,
},
}]
# Compute status based on observed state.
desired_status = {
"kubeflow-pipelines-ready":
len(children["Secret.v1"]) == 1 and
len(children["ConfigMap.v1"]) == desired_configmap_count and
len(children["Deployment.apps/v1"]) == 2 and
len(children["Service.v1"]) == 2 and
len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and
len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 and
"True" or "False"
}
# Generate the desired child object(s).
desired_resources += [
{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "metadata-grpc-configmap",
"namespace": namespace,
},
"data": {
"METADATA_GRPC_SERVICE_HOST":
"metadata-grpc-service.kubeflow",
"METADATA_GRPC_SERVICE_PORT": "8080",
},
},
# Visualization server related manifests below
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-visualizationserver"
},
},
"template": {
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"annotations": disable_istio_sidecar and {
"sidecar.istio.io/inject": "false"
} or {},
},
"spec": {
"containers": [{
"image": f"{visualization_server_image}:{visualization_server_tag}",
"imagePullPolicy":
"IfNotPresent",
"name":
"ml-pipeline-visualizationserver",
"ports": [{
"containerPort": 8888
}],
"resources": {
"requests": {
"cpu": "50m",
"memory": "200Mi"
},
"limits": {
"cpu": "500m",
"memory": "1Gi"
},
}
}],
"serviceAccountName":
"default-editor",
},
},
},
},
{
"apiVersion": "networking.istio.io/v1alpha3",
"kind": "DestinationRule",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"host": "ml-pipeline-visualizationserver",
"trafficPolicy": {
"tls": {
"mode": "ISTIO_MUTUAL"
}
}
}
},
{
"apiVersion": "security.istio.io/v1beta1",
"kind": "AuthorizationPolicy",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-visualizationserver"
}
},
"rules": [{
"from": [{
"source": {
"principals": ["cluster.local/ns/kubeflow/sa/ml-pipeline"]
}
}]
}]
}
},
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"ports": [{
"name": "http",
"port": 8888,
"protocol": "TCP",
"targetPort": 8888,
}],
"selector": {
"app": "ml-pipeline-visualizationserver",
},
},
},
# Artifact fetcher related resources below.
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"labels": {
"app": "ml-pipeline-ui-artifact"
},
"name": "ml-pipeline-ui-artifact",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-ui-artifact"
}
},
"template": {
"metadata": {
"labels": {
"app": "ml-pipeline-ui-artifact"
},
"annotations": disable_istio_sidecar and {
"sidecar.istio.io/inject": "false"
} or {},
},
"spec": {
"containers": [{
"name":
"ml-pipeline-ui-artifact",
"image": f"{frontend_image}:{frontend_tag}",
"imagePullPolicy":
"IfNotPresent",
"ports": [{
"containerPort": 3000
}],
"resources": {
"requests": {
"cpu": "10m",
"memory": "70Mi"
},
"limits": {
"cpu": "100m",
"memory": "500Mi"
},
}
}],
"serviceAccountName":
"default-editor"
}
}
}
},
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "ml-pipeline-ui-artifact",
"namespace": namespace,
"labels": {
"app": "ml-pipeline-ui-artifact"
}
},
"spec": {
"ports": [{
"name":
"http", # name is required to let istio understand request protocol
"port": 80,
"protocol": "TCP",
"targetPort": 3000
}],
"selector": {
"app": "ml-pipeline-ui-artifact"
}
}
},
]
print('Received request:\n', json.dumps(parent, indent=2, sort_keys=True))
print('Desired resources except secrets:\n', json.dumps(desired_resources, indent=2, sort_keys=True))
# Moved after the print argument because this is sensitive data.
desired_resources.append({
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "mlpipeline-minio-artifact",
"namespace": namespace,
},
"data": {
"accesskey": minio_access_key,
"secretkey": minio_secret_key,
},
})
return {"status": desired_status, "children": desired_resources}
def do_POST(self):
# Serve the sync() function as a JSON webhook.
observed = json.loads(
self.rfile.read(int(self.headers.get("content-length"))))
desired = self.sync(observed["parent"], observed["children"])
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(desired), 'utf-8'))
return HTTPServer((url, int(controller_port)), Controller)
if __name__ == "__main__":
main()
``` |
{
"source": "jiaozihao18/md-analysis-tools",
"score": 3
} |
#### File: md-analysis-tools/frustration/combine-dihedral-hbond.py
```python
__description__ = \
"""
This determines the hydrogen bonds made by a set of important atoms over a set
of trajectories. It first reads all frames from all trajectories to generate
a list of all populated hydrogen bonds. It then goes through the trajectories
again and records, for a given trajectory, which hydrogen bonds are populated.
Three inputs are required:
1. output from hbond-recorder.tcl
2. output from water-recorder.tcl
3. gro file used for these scripts
It records unique donor-acceptor pairs for atoms specified in
global_important_atoms. It also records which class any waters involved in
hydrogen bonds fall into.
"""
__author__ = "<NAME>"
__usage__ = "./combind-dihedral-hbond.py [USER-SETTABLE PARAMS AT TOP OF SCRIPT]"
__date__ = "100426"
import sys, os
# -------------------- USER-SETTABLE PARAMETERS ----------------------------- #
global_important_atoms = [("NH1","82ARG"),
("NH2","82ARG"),
("SD" ,"75MET"),
("OE1","41GLN"),
("NE2","41GLN"),
("OE1","41GLU"),
("OE2","41GLU"),
("O3" ,"249N06"),
("O3" ,"249N15"),
("O3" ,"249NPR")]
atom_aliases = {("NH1","82ARG"): ("NH","82ARG"),
("NH2","82ARG"): ("NH","82ARG"),
("OE1","41GLU"): ("OE","41GLU"),
("OE2","41GLU"): ("OE","41GLU"),
("OE1","11GLU"): ("OE","11GLU"),
("OE2","11GLU"): ("OE","11GLU"),
("OE1","13GLU"): ("OE","13GLU"),
("OE2","13GLU"): ("OE","13GLU"),
("O3" ,"249N06"):("O3","249LIG"),
("O3" ,"249N15"):("O3","249LIG"),
("O3" ,"249NPR"):("O3","249LIG")}
water_type_list = ["ante","lower","upper","inter"]
file_list = ["SR2-Q41E-M75L_N0600_run000",
"SR2-Q41E-M75L_N0600_run001",
"SR2-Q41E-M75L_N0600_run002",
"SR2_N0600_run000",
"SR2_N0600_run001",
"SR2_N0600_run002",
"SR2_N0600_run003",
"SR2_N0600_run004",
"SR2-Q41E-M75L_N1500_run000",
"SR2-Q41E-M75L_N1500_run001",
"SR2-Q41E-M75L_N1500_run002",
"SR2_N1500_run000",
"SR2_N1500_run001",
"SR2_N1500_run002",
"SR2_NPR_run000",
"SR2_NPR_run001",
"SR2_NPR_run002"]
gro_location = "../../local-run-output/"
output_dir = "trajectory-data/"
# --------------------------------------------------------------------------- #
class BinConformationsError(Exception):
"""
General error class for this module.
"""
pass
def smoothSeries(series,smooth_window=5):
"""
Smooth a series with a window of fixed size.
"""
# Error check
if smooth_window <= 0:
err = "Smoothing window should be an odd integer >= 1!\n"
raise BinConformationsError(err)
# Force the smooth window to be an integer
smooth_window = int(smooth_window)
# make the smoothing window odd if the user specifies an even number
if smooth_window % 2 == 0:
smooth_window += 1
new_series = []
half_window = int(smooth_window)/2
# The first half window is not smoothed
new_series.extend(series[:half_window])
# The middle range is smoothed over smooth_window
indexes = range(half_window,len(series[half_window:-half_window])+half_window)
for i in indexes:
new_s = 0
for k in range(-half_window,half_window+1):
new_s += series[i+k]
new_s = float(new_s)/smooth_window
new_series.append(new_s)
# The last half-window is not smoothed
for i in range(len(series)-half_window,len(series)):
new_series.append(series[i])
for i, v in enumerate(new_series):
new_series[i] = int(round(new_series[i],0))
# A useful test hack for making sure that the smoothing is behaving as expected
#for i in range(len(series)):
# print i, (len(series[0])*"%5i") % tuple(series[i]),
# print (len(series[0])*"%5.2f") % tuple(new_series[i])
#sys.exit()
return new_series
def parseGroFile(gro_file):
"""
Parse a gro file, generating a set of dictionaries mapping index to an
named atom/residue pair and atom/residue pair to index.
"""
f = open(gro_file,'r')
lines = f.readlines()
f.close()
atom_to_index = {}
index_to_atom = []
for l in lines[2:]:
index = int(l[15:20]) - 1
atom = l[11:16].strip()
residue = l[0:8].strip()
index_to_atom.append((atom,residue))
atom_to_index[(atom,residue)] = index
return atom_to_index, index_to_atom
def binConformations(input_root,gro_file,specified_keys=None,
build_key_list=False):
"""
"""
important_atoms = global_important_atoms
# Read hbond file
f = open("%s_hbond.txt" % input_root,'r')
hbond_lines = f.readlines()
f.close()
hbond_lines = [l for l in hbond_lines if l.startswith("->")]
# Read water file
f = open("%s_water.txt" % input_root,'r')
water_lines = f.readlines()
f.close()
water_lines = [l for l in water_lines if l.startswith("->")]
# Parse the GRO file so we can link atom indexes to atom names
atom_to_index, index_to_atom = parseGroFile(gro_file)
# Go through the important atoms, grab their indexes, and discard
# important atoms that do not exist in this file.
to_remove = []
important_indexes = []
for a in important_atoms:
try:
important_indexes.append(atom_to_index[a])
except KeyError:
to_remove.append(a)
important_atoms = [a for a in important_atoms if a not in to_remove]
# Print out the important atoms (a useful way to make sure the script
# is working).
print important_atoms
# If we're building a key list, just make a list of keys.
if build_key_list:
key_list = []
# If we already know the keys, make a dictionary using those keys.
# Otherwise, remain agnostic about which keys will be used.
if specified_keys == None:
obs = {}
else:
obs = dict([(k,[0 for j in range(len(hbond_lines))])
for k in specified_keys])
# Walk through the vmd output file and generate a dictionary containing
# all hydrogen bonds involving important atoms as a function of frame.
for line_index, l in enumerate(hbond_lines):
entry = l.split("|")[1:]
frame = int(entry[0])
# Create lists of donor and acceptor indexes from the vmd output
# line
if entry[1].startswith(" {"):
bonds = entry[1].split("}")
donors = [int(a) for a in bonds[0].strip(" {").split()]
acceptors = [int (a) for a in bonds[1].strip(" {").split()]
else:
bonds = entry[1].split()
donors = int(bonds[0])
acceptors = int(bonds[1])
# Go through all donor/acceptor pairs
for i in range(len(donors)):
# Only take hydrogen bonds with interesting atoms
if donors[i] in important_indexes or \
acceptors[i] in important_indexes:
# Unique donor/acceptor name
d = index_to_atom[donors[i]]
a = index_to_atom[acceptors[i]]
# Skip spurious carbon hydrogen bonds due to slopply h-bond
# cutoff
if d[0][0] == "C" or a[0][0] == "C":
continue
# Collapse indistinguishable atoms (Arg NH1/NH2, etc.)
if d in atom_aliases.keys():
d = atom_aliases[d]
if a in atom_aliases.keys():
a = atom_aliases[a]
# Collapse solvent based on classification in water-recorder
if d[1][-3:] == "SOL" or a[1][-3:] == "SOL":
# Parse the water line
relevant_waters = water_lines[line_index]
water_entry = water_lines[line_index].split("|")[1:]
water_frame = int(entry[0])
if water_frame != frame:
err = "Mismatch between frames in water and hbond!\n"
raise BinConformationsError(err)
water_entry = water_entry[1:]
# Now assign a water type based on whether the donor or
# acceptor was assigned a type by water-recorder.tcl
water_type = None
for j in range(len(water_entry)):
waters = [int(w) for w in water_entry[j].split()]
if donors[i] in waters:
water_type = water_type_list[j]
break
if acceptors[i] in waters:
water_type = water_type_list[j]
break
# If no type was assigned, this is a bulk water
if water_type == None:
water_type = "bulk"
# Record the water type
if d[1][-3:] == "SOL":
d = ("SOL",water_type)
if a[1][-3:] == "SOL":
a = ("SOL",water_type)
# If we get here, we've officially got an interesting hydrogen
# bond! Record the donor/acceptor pair.
key = (d,a)
if build_key_list == True:
key_list.append(key)
else:
# Record that there is a hydrogen bond between d and a in
# this frame. If no d->a hydrogen bonds have been
# recorded so far, make a list of 0s that shows all frames
# beside this one have no hydrogen bond formed.
if key not in obs.keys():
if specified_keys != None:
err = "%r not in specified keys!\n" % key
raise BinConformationsError(err)
else:
obs[key] = [0 for j in range(len(hbond_lines))]
obs[key][frame] += 1
# If we're just building a key list, we're done at this point. Return the
# list of keys.
if build_key_list:
return key_list
# Smooth the observations
for k in obs.keys():
obs[k] = smoothSeries(obs[k])
# Make pretty (R-readable) output for each donor/acceptor pair.
out = ["%20s%20s" % (" ","frame")]
for k in obs.keys():
n = "%s.%s_%s.%s" % tuple([k[0][0],k[0][1],k[1][0],k[1][1]])
out.append("%20s" % n)
out.append("\n")
for i in range(len(hbond_lines)):
out.append("%20i%20i" % (i,i))
for k in obs.keys():
out.append("%20i" % obs[k][i])
out.append("\n")
return "".join(out)
def main(argv=None):
"""
Main function for this module.
"""
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# Build list of all unique hydrogen bonds observed in all trajectories
print "Pass 1: building list of all possible hydrogen bonds"
keys = []
for f in file_list:
print f
gro = os.path.join(gro_location,("%s.gro" % f))
keys.extend(binConformations(f,gro,build_key_list=True))
keys = dict([(k,[]) for k in keys]).keys()
# Count those hydrogen bonds over all trajectories and write to files
print "Pass 2: recording hydrogen bonds for each trajectory"
for f in file_list:
print f
gro = os.path.join(gro_location,("%s.gro" % f))
out = binConformations(f,gro,specified_keys=keys)
g = open(os.path.join(output_dir,"%s_hbond-processed.txt" % f),'w')
g.write(out)
g.close()
if __name__ == "__main__":
main()
```
#### File: md-analysis-tools/local-rmsd/perturb-pdb.py
```python
__description__ = """
Take a pdb file and an R-formatted file containing the columns x_mean, y_mean,
and z_mean, then shift the coordinates of the residues in the pdb file by x_mean,
y_mean, and z_mean. It loads the 10 x the total magnitude of the shift into the
b-factor column of the pdb. The script is kinda dumb: it applies the
perturbations sequentially to each residue in the pdb fie, so make sure that your
pdb and data file have exactly the same residues.
"""
__author__ = "<NAME>"
__date__ = "110520 (the day before the end of the world)"
__usage__ = "perturb-pdb.py pdb_file data_file"
import sys
from math import sqrt
class PerturbPdbError(Exception):
"""
General error class for this module.
"""
pass
def readDataFile(data_file):
"""
Read an R-formatted data file and return a list of tuples containing x,y,z
perturbtations.
"""
fmt_err = """
Data file does not have correct format! Data file should be R-formatted
and have x_mean, y_mean, and z_mean columns!
"""
f = open(data_file,'r')
lines = f.readlines()
f.close()
# Strip comments and blank lines
lines = [l for l in lines if (not l.startswith("#")) and l.strip() != ""]
# Figure out which columns to take
columns = lines[0].split()
try:
x = columns.index("x_mean") + 1
y = columns.index("y_mean") + 1
z = columns.index("z_mean") + 1
except ValueError:
err = "\n\n%s\n\n" % fmt_err
raise PerturbPdbError(err)
# Take data
out = []
for l in lines[1:]:
col = l.split()
out.append([float(col[x]),float(col[y]),float(col[z])])
return out
def perturbPdb(pdb_file,data_file):
"""
Take a pdb file and perturb its coordinates by the values in data file.
"""
f = open(pdb_file,'r')
pdb = f.readlines()
f.close()
data = readDataFile(data_file)
delta = [sqrt((d[0]**2+d[1]**2+d[2]**2)) for d in data]
delta = [10*d/sum(delta) for d in delta]
current_residue = None
out = []
for line in pdb:
if line[0:6] == "ATOM ":
if current_residue == None:
current_residue = line[21:26]
residue_index = 0
if current_residue != line[21:26]:
current_residue = line[21:26]
residue_index += 1
new_x = float(line[30:38]) + data[residue_index][0]
new_y = float(line[38:46]) + data[residue_index][1]
new_z = float(line[46:54]) + data[residue_index][2]
new_b = delta[residue_index]
new_line = "%s%8.3f%8.3f%8.3f%s%6.2f%s" % (line[:30],
new_x,new_y,new_z,
line[54:60],
new_b,line[66:])
out.append(new_line)
else:
out.append(line)
return out
def main(argv=None):
"""
"""
if argv == None:
argv = sys.argv[1:]
try:
pdb_file = argv[0]
data_file = argv[1]
except IndexError:
err = "Incorrect number of arguments!\n\n%s\n\n" % __usage__
raise PerturbPdbError(err)
out = perturbPdb(pdb_file,data_file)
return "".join(out)
if __name__ == "__main__":
print main()
```
#### File: md-analysis-tools/s-aromatic-pi/s-aromatic-pi.py
```python
__description__ =\
"""
"""
__author__ = "<NAME>"
__date__ = "101122"
__usage__ = "count-contacts.py vmd_output_file"
import sys
class CountContactsError(Exception):
"""
General error class for this module.
"""
pass
def parseVMDLine(line):
"""
"""
# Parse the line
split_line = line.split("|")
# Frame number
out = [int(split_line[1])]
out.extend([float(v) for v in split_line[2:]])
return tuple(out)
def main(argv=None):
"""
"""
# Parse command line
if argv == None:
argv = sys.argv[1:]
try:
input_file = argv[0]
except IndexError:
err = __usage__
raise CountContactsError(err)
# Read the input file
f = open(input_file,'r')
lines = f.readlines()
f.close()
lines = [l for l in lines if l.startswith("->")]
# Parse each line in the file
out = []
for l in lines:
out.append(("%10i%10.3f%10.3f\n") % parseVMDLine(l))
# Add line numbers and header
out = ["%10i%s" % (i,x) for i, x in enumerate(out)]
out.insert(0,"%10s%10s%10s%10s\n" % (" ","frame","dist206","dist209"))
print "".join(out)
if __name__ == "__main__":
main()
``` |
{
"source": "jiapei100/DLTK",
"score": 3
} |
#### File: dltk/io/augmentation.py
```python
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
def flip(imagelist, axis=1):
"""Randomly flip spatial dimensions
Args:
imagelist (np.ndarray or list or tuple): image(s) to be flipped
axis (int): axis along which to flip the images
Returns:
np.ndarray or list or tuple: same as imagelist but randomly flipped
along axis
"""
# Check if a single image or a list of images has been passed
was_singular = False
if isinstance(imagelist, np.ndarray):
imagelist = [imagelist]
was_singular = True
# With a probility of 0.5 flip the image(s) across `axis`
do_flip = np.random.random(1)
if do_flip > 0.5:
for i in range(len(imagelist)):
imagelist[i] = np.flip(imagelist[i], axis=axis)
if was_singular:
return imagelist[0]
return imagelist
def add_gaussian_offset(image, sigma=0.1):
"""
Add Gaussian offset to an image. Adds the offset to each channel
independently.
Args:
image (np.ndarray): image to add noise to
sigma (float): stddev of the Gaussian distribution to generate noise
from
Returns:
np.ndarray: same as image but with added offset to each channel
"""
offsets = np.random.normal(0, sigma, ([1] * (image.ndim - 1) + [image.shape[-1]]))
image += offsets
return image
def add_gaussian_noise(image, sigma=0.05):
"""
Add Gaussian noise to an image
Args:
image (np.ndarray): image to add noise to
sigma (float): stddev of the Gaussian distribution to generate noise
from
Returns:
np.ndarray: same as image but with added offset to each channel
"""
image += np.random.normal(0, sigma, image.shape)
return image
def elastic_transform(image, alpha, sigma):
"""
Elastic deformation of images as described in [1].
[1] Simard, Steinkraus and Platt, "Best Practices for Convolutional
Neural Networks applied to Visual Document Analysis", in Proc. of the
International Conference on Document Analysis and Recognition, 2003.
Based on gist https://gist.github.com/erniejunior/601cdf56d2b424757de5
Args:
image (np.ndarray): image to be deformed
alpha (list): scale of transformation for each dimension, where larger
values have more deformation
sigma (list): Gaussian window of deformation for each dimension, where
smaller values have more localised deformation
Returns:
np.ndarray: deformed image
"""
assert len(alpha) == len(sigma), \
"Dimensions of alpha and sigma are different"
channelbool = image.ndim - len(alpha)
out = np.zeros((len(alpha) + channelbool, ) + image.shape)
# Generate a Gaussian filter, leaving channel dimensions zeroes
for jj in range(len(alpha)):
array = (np.random.rand(*image.shape) * 2 - 1)
out[jj] = gaussian_filter(array, sigma[jj],
mode="constant", cval=0) * alpha[jj]
# Map mask to indices
shapes = list(map(lambda x: slice(0, x, None), image.shape))
grid = np.broadcast_arrays(*np.ogrid[shapes])
indices = list(map((lambda x: np.reshape(x, (-1, 1))), grid + np.array(out)))
# Transform image based on masked indices
transformed_image = map_coordinates(image, indices, order=0,
mode='reflect').reshape(image.shape)
return transformed_image
def extract_class_balanced_example_array(image,
label,
example_size=[1, 64, 64],
n_examples=1,
classes=2,
class_weights=None):
"""Extract training examples from an image (and corresponding label) subject
to class balancing. Returns an image example array and the
corresponding label array.
Args:
image (np.ndarray): image to extract class-balanced patches from
label (np.ndarray): labels to use for balancing the classes
example_size (list or tuple): shape of the patches to extract
n_examples (int): number of patches to extract in total
classes (int or list or tuple): number of classes or list of classes
to extract
Returns:
np.ndarray, np.ndarray: class-balanced patches extracted from full
images with the shape [batch, example_size..., image_channels]
"""
assert image.shape[:-1] == label.shape, 'Image and label shape must match'
assert image.ndim - 1 == len(example_size), \
'Example size doesnt fit image size'
assert all([i_s >= e_s for i_s, e_s in zip(image.shape, example_size)]), \
'Image must be larger than example shape'
rank = len(example_size)
if isinstance(classes, int):
classes = tuple(range(classes))
n_classes = len(classes)
assert n_examples >= n_classes, \
'n_examples need to be greater than n_classes'
if class_weights is None:
n_ex_per_class = np.ones(n_classes).astype(int) * int(np.round(n_examples / n_classes))
else:
assert len(class_weights) == n_classes, \
'Class_weights must match number of classes'
class_weights = np.array(class_weights)
n_ex_per_class = np.round((class_weights / class_weights.sum()) * n_examples).astype(int)
# Compute an example radius to define the region to extract around a
# center location
ex_rad = np.array(list(zip(np.floor(np.array(example_size) / 2.0),
np.ceil(np.array(example_size) / 2.0))),
dtype=np.int)
class_ex_images = []
class_ex_lbls = []
min_ratio = 1.
for c_idx, c in enumerate(classes):
# Get valid, random center locations belonging to that class
idx = np.argwhere(label == c)
ex_images = []
ex_lbls = []
if len(idx) == 0 or n_ex_per_class[c_idx] == 0:
class_ex_images.append([])
class_ex_lbls.append([])
continue
# Extract random locations
r_idx_idx = np.random.choice(len(idx),
size=min(n_ex_per_class[c_idx], len(idx)),
replace=False).astype(int)
r_idx = idx[r_idx_idx]
# Shift the random to valid locations if necessary
r_idx = np.array(
[np.array([max(min(r[dim], image.shape[dim] - ex_rad[dim][1]),
ex_rad[dim][0]) for dim in range(rank)])
for r in r_idx])
for i in range(len(r_idx)):
# Extract class-balanced examples from the original image
slicer = [slice(r_idx[i][dim] -
ex_rad[dim][0], r_idx[i][dim] +
ex_rad[dim][1]) for dim in range(rank)]
ex_image = image[slicer][np.newaxis, :]
ex_lbl = label[slicer][np.newaxis, :]
# Concatenate them and return the examples
ex_images = np.concatenate((ex_images, ex_image), axis=0) \
if (len(ex_images) != 0) else ex_image
ex_lbls = np.concatenate((ex_lbls, ex_lbl), axis=0) \
if (len(ex_lbls) != 0) else ex_lbl
class_ex_images.append(ex_images)
class_ex_lbls.append(ex_lbls)
ratio = n_ex_per_class[c_idx] / len(ex_images)
min_ratio = ratio if ratio < min_ratio else min_ratio
indices = np.floor(n_ex_per_class * min_ratio).astype(int)
ex_images = np.concatenate([cimage[:idxs] for cimage, idxs in zip(class_ex_images, indices)
if len(cimage) > 0], axis=0)
ex_lbls = np.concatenate([clbl[:idxs] for clbl, idxs in zip(class_ex_lbls, indices)
if len(clbl) > 0], axis=0)
return ex_images, ex_lbls
def extract_random_example_array(image_list,
example_size=[1, 64, 64],
n_examples=1):
"""Randomly extract training examples from image (and a corresponding label).
Returns an image example array and the corresponding label array.
Args:
image_list (np.ndarray or list or tuple): image(s) to extract random
patches from
example_size (list or tuple): shape of the patches to extract
n_examples (int): number of patches to extract in total
Returns:
np.ndarray, np.ndarray: class-balanced patches extracted from full
images with the shape [batch, example_size..., image_channels]
"""
assert n_examples > 0
was_singular = False
if isinstance(image_list, np.ndarray):
image_list = [image_list]
was_singular = True
assert all([i_s >= e_s for i_s, e_s in zip(image_list[0].shape, example_size)]), \
'Image must be bigger than example shape'
assert (image_list[0].ndim - 1 == len(example_size) or
image_list[0].ndim == len(example_size)), \
'Example size doesnt fit image size'
for i in image_list:
if len(image_list) > 1:
assert (i.ndim - 1 == image_list[0].ndim or
i.ndim == image_list[0].ndim or
i.ndim + 1 == image_list[0].ndim), \
'Example size doesnt fit image size'
assert all([i0_s == i_s for i0_s, i_s in zip(image_list[0].shape, i.shape)]), \
'Image shapes must match'
rank = len(example_size)
# Extract random examples from image and label
valid_loc_range = [image_list[0].shape[i] - example_size[i] for i in range(rank)]
rnd_loc = [np.random.randint(valid_loc_range[dim], size=n_examples)
if valid_loc_range[dim] > 0
else np.zeros(n_examples, dtype=int) for dim in range(rank)]
examples = [[]] * len(image_list)
for i in range(n_examples):
slicer = [slice(rnd_loc[dim][i], rnd_loc[dim][i] + example_size[dim])
for dim in range(rank)]
for j in range(len(image_list)):
ex_image = image_list[j][slicer][np.newaxis]
# Concatenate and return the examples
examples[j] = np.concatenate((examples[j], ex_image), axis=0) \
if (len(examples[j]) != 0) else ex_image
if was_singular:
return examples[0]
return examples
```
#### File: networks/segmentation/deepmedic.py
```python
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import tensorflow as tf
from dltk.core.upsample import linear_upsample_3d
from dltk.core.activations import prelu, leaky_relu
def crop_central_block(x, size):
assert all([i >= s for i, s in zip(x.get_shape().as_list()[1:], size)]), \
'Output size must not be bigger than input size. But was {} compared ' \
'to {}'.format(x.get_shape().as_list()[1:], size)
slicer = [slice(None)] * len(x.get_shape().as_list())
for i in range(len(size)):
# use i + 1 to account for batch dimension
start = (x.get_shape().as_list()[i + 1] - size[i]) // 2
end = start + size[i]
slicer[i + 1] = slice(start, end)
return x[slicer]
def deepmedic_3d(inputs, num_classes,
normal_filters=(30, 30, 40, 40, 40, 40, 50, 50),
normal_strides=((1, 1, 1), (1, 1, 1), (1, 1, 1), (1, 1, 1),
(1, 1, 1), (1, 1, 1), (1, 1, 1), (1, 1, 1)),
normal_kernels=((3, 3, 3), (3, 3, 3), (3, 3, 3), (3, 3, 3),
(3, 3, 3), (3, 3, 3), (3, 3, 3), (3, 3, 3)),
normal_residuals=(4, 6, 8),
normal_input_shape=(25, 25, 25),
subsampled_filters=((30, 30, 40, 40, 40, 40, 50, 50),),
subsampled_strides=(((1, 1, 1), (1, 1, 1), (1, 1, 1),
(1, 1, 1), (1, 1, 1), (1, 1, 1),
(1, 1, 1), (1, 1, 1)),),
subsampled_kernels=(((3, 3, 3), (3, 3, 3), (3, 3, 3),
(3, 3, 3), (3, 3, 3), (3, 3, 3),
(3, 3, 3), (3, 3, 3)),),
subsampled_residuals=((4, 6, 8),),
subsampled_input_shapes=((57, 57, 57),),
subsample_factors=((3, 3, 3),),
fc_filters=(150, 150),
first_fc_kernel=(3, 3, 3),
fc_residuals=(2, ),
padding='VALID',
use_prelu=True,
mode=tf.estimator.ModeKeys.EVAL,
use_bias=True,
kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None):
"""
Image segmentation network based on a DeepMedic architecture [1, 2].
Downsampling of features is done via strided convolutions. The architecture
uses multiple processing paths with different resolutions. The different
pathways are concatenated and then fed to the convolutional fc layers.
[1] <NAME> al. Efficient Multi-Scale 3D CNN with Fully
Connected CRF for Accurate Brain Lesion Segmentation. Medical Image
Analysis, 2016.
[2] <NAME> et al. Multi-Scale 3D CNNs for segmentation of
brain Lesions in multi-modal MRI. ISLES challenge, MICCAI 2015.
Note: We are currently using bilinear upsampling whereas the original
implementation (https://github.com/Kamnitsask/deepmedic) uses repeat
upsampling.
Args:
inputs (tf.Tensor): Input feature tensor to the network (rank 5
required).
num_classes (int): Number of output classes.
normal_filters (array_like, optional): Number of filters for each layer
for normal path.
normal_strides (array_like, optional): Strides for each layer for
normal path.
normal_kernels (array_like, optional): Kernel size for each layer for
normal path.
normal_residuals (array_like, optional): Location of residual
connections for normal path.
normal_input_shape (array_like, optional): Shape of input to normal
path. Input to the network is center cropped to this shape.
subsampled_filters (array_like, optional): Number of filters for each
layer for each subsampled path.
subsampled_strides (array_like, optional): Strides for each layer for
each subsampled path.
subsampled_kernels (array_like, optional): Kernel size for each layer
for each subsampled path.
subsampled_residuals (array_like, optional): Location of residual
connections for each subsampled path.
subsampled_input_shapes (array_like, optional): Shape of input to
subsampled paths. Input to the network is downsampled and then
center cropped to this shape.
subsample_factors (array_like, optional): Downsampling factors for
each subsampled path.
fc_filters (array_like, optional): Number of filters for the fc layers.
first_fc_kernel (array_like, optional): Shape of the kernel of the
first fc layer.
fc_residuals (array_like, optional): Location of residual connections
for the fc layers.
padding (string, optional): Type of padding used for convolutions.
Standard is `VALID`
use_prelu (bool, optional): Flag to enable PReLU activation.
Alternatively leaky ReLU is used. Defaults to `True`.
mode (TYPE, optional): One of the tf.estimator.ModeKeys strings: TRAIN,
EVAL or PREDICT
use_bias (bool, optional): Boolean, whether the layer uses a bias.
kernel_initializer (TYPE, optional): An initializer for the convolution
kernel.
bias_initializer (TYPE, optional): An initializer for the bias vector.
If None, no bias will be applied.
kernel_regularizer (None, optional): Optional regularizer for the
convolution kernel.
bias_regularizer (None, optional): Optional regularizer for the bias
vector.
Returns:
dict: dictionary of output tensors
"""
outputs = {}
assert len(normal_filters) == len(normal_strides)
assert len(normal_filters) == len(normal_kernels)
assert len(inputs.get_shape().as_list()) == 5, \
'inputs are required to have a rank of 5.'
conv_params = {'use_bias': use_bias,
'kernel_initializer': kernel_initializer,
'bias_initializer': bias_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'padding': padding}
def _residual_connection(x, prev_x):
# crop previous to current size:
prev_x = crop_central_block(prev_x, x.get_shape().as_list()[1:-1])
# add prev_x to first channels of x
to_pad = [[0, 0]] * (len(x.get_shape().as_list()) - 1)
to_pad += [[0, x.get_shape().as_list()[-1] -
prev_x.get_shape().as_list()[-1]]]
prev_x = tf.pad(prev_x, to_pad)
return x + prev_x
def _build_normal_pathway(x):
with tf.variable_scope('normal_pathway'):
tf.logging.info('Building normal pathway')
center_crop = crop_central_block(x, normal_input_shape)
tf.logging.info('Input is {}'.format(
center_crop.get_shape().as_list()))
layers = []
x = center_crop
for i in range(len(normal_filters)):
with tf.variable_scope('layer_{}'.format(i)):
layers.append(x)
if i > 0:
x = tf.layers.batch_normalization(
x, training=mode == tf.estimator.ModeKeys.TRAIN)
x = prelu(x) if use_prelu else leaky_relu(x, 0.01)
x = tf.layers.conv3d(x,
normal_filters[i],
normal_kernels[i],
normal_strides[i],
**conv_params)
# TODO: add pooling and dropout?!
if i + 1 in normal_residuals:
x = _residual_connection(x, layers[i - 1])
tf.logging.info('Output of layer {} is {}'.format(
i, x.get_shape().as_list()))
tf.logging.info('Output is {}'.format(x.get_shape().as_list()))
return x
def _downsample(x, factor):
if isinstance(factor, int):
factor = [factor] * (len(x.get_shape().as_list()) - 2)
pool_func = tf.nn.avg_pool3d
factor = list(factor)
x = pool_func(x, [1, ] + factor + [1, ], [1, ] + factor + [1, ],
'VALID')
return x
def _build_subsampled_pathways(x):
pathways = []
for pathway in range(len(subsample_factors)):
with tf.variable_scope('subsampled_pathway_{}'.format(pathway)):
tf.logging.info(
'Building subsampled pathway {}'.format(pathway))
center_crop = crop_central_block(
x, subsampled_input_shapes[pathway])
tf.logging.info('Input is {}'.format(
center_crop.get_shape().as_list()))
layers = []
x = center_crop
x = _downsample(x, subsample_factors[pathway])
tf.logging.info('Downsampled input is {}'.format(
x.get_shape().as_list()))
for i in range(len(subsampled_filters[pathway])):
with tf.variable_scope('layer_{}'.format(i)):
layers.append(x)
if i > 0:
x = tf.layers.batch_normalization(
x, training=mode == tf.estimator.ModeKeys.TRAIN)
x = prelu(x) if use_prelu else leaky_relu(x, 0.01)
x = tf.layers.conv3d(x, subsampled_filters[pathway][i],
subsampled_kernels[pathway][i],
subsampled_strides[pathway][i],
**conv_params)
# TODO: add pooling and dropout?!
if i + 1 in subsampled_residuals:
x = _residual_connection(x, layers[i - 1])
tf.logging.info('Output of layer {} is {}'.format(
i, x.get_shape().as_list()))
x = _upsample(x, subsample_factors[pathway])
tf.logging.info('Output is {}'.format(x.get_shape().as_list()))
pathways.append(x)
return pathways
def _upsample(x, factor):
if isinstance(factor, int):
factor = [factor] * (len(x.get_shape().as_list()) - 2)
# TODO: build repeat upsampling
x = linear_upsample_3d(x, strides=factor)
return x
x = inputs
normal = _build_normal_pathway(x)
pathways = _build_subsampled_pathways(x)
normal_shape = normal.get_shape().as_list()[1:-1]
paths = [normal]
for x in pathways:
paths.append(crop_central_block(x, normal_shape))
x = tf.concat(paths, -1)
layers = []
for i in range(len(fc_filters)):
with tf.variable_scope('fc_{}'.format(i)):
layers.append(x)
if i == 0 and any([k > 1 for k in first_fc_kernel]):
x_shape = x.get_shape().as_list()
# CAUTION: https://docs.python.org/2/faq/programming.html#how-do-i-create-a-multidimensional-list
x_pad = [[0, 0] for _ in range(len(x_shape))]
for j in range(len(first_fc_kernel)):
to_pad = (first_fc_kernel[j] - 1)
x_pad[j + 1][0] = to_pad // 2
x_pad[j + 1][1] = to_pad - x_pad[j + 1][0]
print(x_pad)
x = tf.pad(x, x_pad, mode='SYMMETRIC')
x = tf.layers.batch_normalization(
x, training=mode == tf.estimator.ModeKeys.TRAIN)
x = prelu(x) if use_prelu else leaky_relu(x, 0.01)
x = tf.layers.conv3d(x, fc_filters[i],
first_fc_kernel if i == 0 else 1,
**conv_params)
if i + 1 in fc_residuals:
x = _residual_connection(x, layers[i - 1])
with tf.variable_scope('last'):
x = tf.layers.batch_normalization(
x, training=mode == tf.estimator.ModeKeys.TRAIN)
x = prelu(x) if use_prelu else leaky_relu(x, 0.01)
conv_params['use_bias'] = True
x = tf.layers.conv3d(x, num_classes, 1, **conv_params)
outputs['logits'] = x
tf.logging.info('last conv shape %s', x.get_shape())
with tf.variable_scope('pred'):
y_prob = tf.nn.softmax(x)
outputs['y_prob'] = y_prob
y_ = tf.argmax(x, axis=-1)
outputs['y_'] = y_
return outputs
``` |
{
"source": "jiapei100/T2F",
"score": 2
} |
#### File: T2F/implementation/train_network.py
```python
import datetime
import time
import torch as th
import numpy as np
import data_processing.DataLoader as dl
import argparse
import yaml
import os
import pickle
import timeit
from torch.backends import cudnn
# define the device for the training script
device = th.device("cuda" if th.cuda.is_available() else "cpu")
# set torch manual seed for consistent output
th.manual_seed(3)
# Start fast training mode:
cudnn.benchmark = True
def parse_arguments():
"""
command line arguments parser
:return: args => parsed command line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("--config", action="store", type=str, default="configs/1.conf",
help="default configuration for the Network")
parser.add_argument("--start_depth", action="store", type=int, default=0,
help="Starting depth for training the network")
parser.add_argument("--encoder_file", action="store", type=str, default=None,
help="pretrained Encoder file (compatible with my code)")
parser.add_argument("--ca_file", action="store", type=str, default=None,
help="pretrained Conditioning Augmentor file (compatible with my code)")
parser.add_argument("--generator_file", action="store", type=str, default=None,
help="pretrained Generator file (compatible with my code)")
parser.add_argument("--discriminator_file", action="store", type=str, default=None,
help="pretrained Discriminator file (compatible with my code)")
args = parser.parse_args()
return args
def get_config(conf_file):
"""
parse and load the provided configuration
:param conf_file: configuration file
:return: conf => parsed configuration
"""
from easydict import EasyDict as edict
with open(conf_file, "r") as file_descriptor:
data = yaml.load(file_descriptor)
# convert the data into an easyDictionary
return edict(data)
def create_grid(samples, scale_factor, img_file, real_imgs=False):
"""
utility function to create a grid of GAN samples
:param samples: generated samples for storing
:param scale_factor: factor for upscaling the image
:param img_file: name of file to write
:param real_imgs: turn off the scaling of images
:return: None (saves a file)
"""
from torchvision.utils import save_image
from torch.nn.functional import interpolate
samples = th.clamp((samples / 2) + 0.5, min=0, max=1)
# upsample the image
if not real_imgs and scale_factor > 1:
samples = interpolate(samples,
scale_factor=scale_factor)
# save the images:
save_image(samples, img_file, nrow=int(np.sqrt(len(samples))))
def create_descriptions_file(file, captions, dataset):
"""
utility function to create a file for storing the captions
:param file: file for storing the captions
:param captions: encoded_captions or raw captions
:param dataset: the dataset object for transforming captions
:return: None (saves a file)
"""
from functools import reduce
# transform the captions to text:
if isinstance(captions, th.Tensor):
captions = list(map(lambda x: dataset.get_english_caption(x.cpu()),
[captions[i] for i in range(captions.shape[0])]))
with open(file, "w") as filler:
for caption in captions:
filler.write(reduce(lambda x, y: x + " " + y, caption, ""))
filler.write("\n\n")
else:
with open(file, "w") as filler:
for caption in captions:
filler.write(caption)
filler.write("\n\n")
def train_networks(encoder, ca, c_pro_gan, dataset, epochs,
encoder_optim, ca_optim, fade_in_percentage,
batch_sizes, start_depth, num_workers, feedback_factor,
log_dir, sample_dir, checkpoint_factor,
save_dir, use_matching_aware_dis=True):
# required only for type checking
from networks.TextEncoder import PretrainedEncoder
# input assertions
assert c_pro_gan.depth == len(batch_sizes), "batch_sizes not compatible with depth"
assert c_pro_gan.depth == len(epochs), "epochs_sizes not compatible with depth"
assert c_pro_gan.depth == len(fade_in_percentage), "fip_sizes not compatible with depth"
# put all the Networks in training mode:
ca.train()
c_pro_gan.gen.train()
c_pro_gan.dis.train()
if not isinstance(encoder, PretrainedEncoder):
encoder.train()
print("Starting the training process ... ")
# create fixed_input for debugging
temp_data = dl.get_data_loader(dataset, batch_sizes[start_depth], num_workers=3)
fixed_captions, fixed_real_images = iter(temp_data).next()
fixed_embeddings = encoder(fixed_captions)
fixed_embeddings = th.from_numpy(fixed_embeddings).to(device)
fixed_c_not_hats, _, _ = ca(fixed_embeddings)
fixed_noise = th.randn(len(fixed_captions),
c_pro_gan.latent_size - fixed_c_not_hats.shape[-1]).to(device)
fixed_gan_input = th.cat((fixed_c_not_hats, fixed_noise), dim=-1)
# save the fixed_images once:
fixed_save_dir = os.path.join(sample_dir, "__Real_Info")
os.makedirs(fixed_save_dir, exist_ok=True)
create_grid(fixed_real_images, None, # scale factor is not required here
os.path.join(fixed_save_dir, "real_samples.png"), real_imgs=True)
create_descriptions_file(os.path.join(fixed_save_dir, "real_captions.txt"),
fixed_captions,
dataset)
# create a global time counter
global_time = time.time()
# delete temp data loader:
del temp_data
for current_depth in range(start_depth, c_pro_gan.depth):
print("\n\nCurrently working on Depth: ", current_depth)
current_res = np.power(2, current_depth + 2)
print("Current resolution: %d x %d" % (current_res, current_res))
data = dl.get_data_loader(dataset, batch_sizes[current_depth], num_workers)
ticker = 1
for epoch in range(1, epochs[current_depth] + 1):
start = timeit.default_timer() # record time at the start of epoch
print("\nEpoch: %d" % epoch)
total_batches = len(iter(data))
fader_point = int((fade_in_percentage[current_depth] / 100)
* epochs[current_depth] * total_batches)
for (i, batch) in enumerate(data, 1):
# calculate the alpha for fading in the layers
alpha = ticker / fader_point if ticker <= fader_point else 1
# extract current batch of data for training
captions, images = batch
if encoder_optim is not None:
captions = captions.to(device)
images = images.to(device)
# perform text_work:
embeddings = th.from_numpy(encoder(captions)).to(device)
if encoder_optim is None:
# detach the LSTM from backpropagation
embeddings = embeddings.detach()
c_not_hats, mus, sigmas = ca(embeddings)
z = th.randn(
len(captions),
c_pro_gan.latent_size - c_not_hats.shape[-1]
).to(device)
gan_input = th.cat((c_not_hats, z), dim=-1)
# optimize the discriminator:
dis_loss = c_pro_gan.optimize_discriminator(gan_input, images,
embeddings.detach(),
current_depth, alpha,
use_matching_aware_dis)
# optimize the generator:
z = th.randn(
captions.shape[0] if isinstance(captions, th.Tensor) else len(captions),
c_pro_gan.latent_size - c_not_hats.shape[-1]
).to(device)
gan_input = th.cat((c_not_hats, z), dim=-1)
if encoder_optim is not None:
encoder_optim.zero_grad()
ca_optim.zero_grad()
gen_loss = c_pro_gan.optimize_generator(gan_input, embeddings,
current_depth, alpha)
# once the optimize_generator is called, it also sends gradients
# to the Conditioning Augmenter and the TextEncoder. Hence the
# zero_grad statements prior to the optimize_generator call
# now perform optimization on those two as well
# obtain the loss (KL divergence from ca_optim)
kl_loss = th.mean(0.5 * th.sum((mus ** 2) + (sigmas ** 2)
- th.log((sigmas ** 2)) - 1, dim=1))
kl_loss.backward()
ca_optim.step()
if encoder_optim is not None:
encoder_optim.step()
# provide a loss feedback
if i % int(total_batches / feedback_factor) == 0 or i == 1:
elapsed = time.time() - global_time
elapsed = str(datetime.timedelta(seconds=elapsed))
print("Elapsed [%s] batch: %d d_loss: %f g_loss: %f kl_los: %f"
% (elapsed, i, dis_loss, gen_loss, kl_loss.item()))
# also write the losses to the log file:
os.makedirs(log_dir, exist_ok=True)
log_file = os.path.join(log_dir, "loss_" + str(current_depth) + ".log")
with open(log_file, "a") as log:
log.write(str(dis_loss) + "\t" + str(gen_loss)
+ "\t" + str(kl_loss.item()) + "\n")
# create a grid of samples and save it
gen_img_file = os.path.join(sample_dir, "gen_" + str(current_depth) +
"_" + str(epoch) + "_" +
str(i) + ".png")
create_grid(
samples=c_pro_gan.gen(
fixed_gan_input,
current_depth,
alpha
),
scale_factor=int(np.power(2, c_pro_gan.depth - current_depth - 1)),
img_file=gen_img_file,
)
# increment the ticker:
ticker += 1
stop = timeit.default_timer()
print("Time taken for epoch: %.3f secs" % (stop - start))
if epoch % checkpoint_factor == 0 or epoch == 0:
# save the Model
encoder_save_file = os.path.join(save_dir, "Encoder_" +
str(current_depth) + ".pth")
ca_save_file = os.path.join(save_dir, "Condition_Augmentor_" +
str(current_depth) + ".pth")
gen_save_file = os.path.join(save_dir, "GAN_GEN_" +
str(current_depth) + ".pth")
dis_save_file = os.path.join(save_dir, "GAN_DIS_" +
str(current_depth) + ".pth")
os.makedirs(save_dir, exist_ok=True)
if encoder_optim is not None:
th.save(encoder.state_dict(), encoder_save_file, pickle)
th.save(ca.state_dict(), ca_save_file, pickle)
th.save(c_pro_gan.gen.state_dict(), gen_save_file, pickle)
th.save(c_pro_gan.dis.state_dict(), dis_save_file, pickle)
print("Training completed ...")
def main(args):
"""
Main function for the script
:param args: parsed command line arguments
:return: None
"""
from networks.TextEncoder import Encoder
from networks.ConditionAugmentation import ConditionAugmentor
from pro_gan_pytorch.PRO_GAN import ConditionalProGAN
print(args.config)
config = get_config(args.config)
print("Current Configuration:", config)
# create the dataset for training
if config.use_pretrained_encoder:
dataset = dl.RawTextFace2TextDataset(
annots_file=config.annotations_file,
img_dir=config.images_dir,
img_transform=dl.get_transform(config.img_dims)
)
from networks.TextEncoder import PretrainedEncoder
# create a new session object for the pretrained encoder:
text_encoder = PretrainedEncoder(
model_file=config.pretrained_encoder_file,
embedding_file=config.pretrained_embedding_file,
device=device
)
encoder_optim = None
else:
dataset = dl.Face2TextDataset(
pro_pick_file=config.processed_text_file,
img_dir=config.images_dir,
img_transform=dl.get_transform(config.img_dims),
captions_len=config.captions_length
)
text_encoder = Encoder(
embedding_size=config.embedding_size,
vocab_size=dataset.vocab_size,
hidden_size=config.hidden_size,
num_layers=config.num_layers,
device=device
)
encoder_optim = th.optim.Adam(text_encoder.parameters(),
lr=config.learning_rate,
betas=(config.beta_1, config.beta_2),
eps=config.eps)
# create the networks
if args.encoder_file is not None:
# Note this should not be used with the pretrained encoder file
print("Loading encoder from:", args.encoder_file)
text_encoder.load_state_dict(th.load(args.encoder_file))
condition_augmenter = ConditionAugmentor(
input_size=config.hidden_size,
latent_size=config.ca_out_size,
use_eql=config.use_eql,
device=device
)
if args.ca_file is not None:
print("Loading conditioning augmenter from:", args.ca_file)
condition_augmenter.load_state_dict(th.load(args.ca_file))
c_pro_gan = ConditionalProGAN(
embedding_size=config.hidden_size,
depth=config.depth,
latent_size=config.latent_size,
compressed_latent_size=config.compressed_latent_size,
learning_rate=config.learning_rate,
beta_1=config.beta_1,
beta_2=config.beta_2,
eps=config.eps,
drift=config.drift,
n_critic=config.n_critic,
use_eql=config.use_eql,
loss=config.loss_function,
use_ema=config.use_ema,
ema_decay=config.ema_decay,
device=device
)
if args.generator_file is not None:
print("Loading generator from:", args.generator_file)
c_pro_gan.gen.load_state_dict(th.load(args.generator_file))
if args.discriminator_file is not None:
print("Loading discriminator from:", args.discriminator_file)
c_pro_gan.dis.load_state_dict(th.load(args.discriminator_file))
# create the optimizer for Condition Augmenter separately
ca_optim = th.optim.Adam(condition_augmenter.parameters(),
lr=config.learning_rate,
betas=(config.beta_1, config.beta_2),
eps=config.eps)
print("Generator Config:")
print(c_pro_gan.gen)
print("\nDiscriminator Config:")
print(c_pro_gan.dis)
# train all the networks
train_networks(
encoder=text_encoder,
ca=condition_augmenter,
c_pro_gan=c_pro_gan,
dataset=dataset,
encoder_optim=encoder_optim,
ca_optim=ca_optim,
epochs=config.epochs,
fade_in_percentage=config.fade_in_percentage,
start_depth=args.start_depth,
batch_sizes=config.batch_sizes,
num_workers=config.num_workers,
feedback_factor=config.feedback_factor,
log_dir=config.log_dir,
sample_dir=config.sample_dir,
checkpoint_factor=config.checkpoint_factor,
save_dir=config.save_dir,
use_matching_aware_dis=config.use_matching_aware_discriminator
)
if __name__ == '__main__':
# invoke the main function of the script
main(parse_arguments())
``` |
{
"source": "jiapei100/tensorspace-converter",
"score": 3
} |
#### File: tensorspacejs/krs/keras_model.py
```python
from keras.models import Model, load_model, save_model, model_from_json
from tensorflowjs.converters.converter import dispatch_keras_h5_to_tfjs_layers_model_conversion
from utility.file_utility import remove_file, valid_file, valid_directory, show_invalid_message
TEMP_MODEL_NAME = '/enc_model.h5'
def show_keras_model_summary(path_model):
"""Present model summary by single model file
Load the model from a single model file, then present model summary
:param path_model: path to the single model file
:return: should not return anything
"""
if not valid_file(path_model):
show_invalid_message('input file', path_model)
return
print("show summary of keras saved model...")
model = load_from_saved_model(path_model)
model.summary()
def show_summary_weights(path_topology, path_weights):
"""Present model summary by model topology and weights
Load the model from topology and weights, then present model summary
:param path_topology: path to model topology file
:param path_weights: path to model weights file
:return: should not return anything
"""
if not valid_file(path_topology):
show_invalid_message('model topology file', path_topology)
return
if not valid_file(path_weights):
show_invalid_message('model weights file', path_weights)
return
print("show summary of keras saved topology + weights...")
model = load_from_saved_weights(path_topology, path_weights)
model.summary()
def preprocess_from_model(path_model, path_output_dir, output_node_names=None):
"""Preprocess a model built by Keras (from single .h5 file)
:param path_model:
:param path_output_dir:
:param output_node_names:
:return: should not return anything
"""
if not valid_file(path_model):
show_invalid_message('input file', path_model)
return
model = load_from_saved_model(path_model)
enc_model = generate_encapsulate_model(model, split_layer_name_list(output_node_names))
# Generate temp Keras enc_model for further processing
save_enc_model(path_output_dir, enc_model)
convert_tfjs(path_output_dir)
clean_temp_file(path_output_dir)
def preprocess_from_weights(path_topology, path_weights, path_output_dir, output_node_names=None):
"""""Preprocess a model built by Keras (from topology+weights)
:param path_topology:
:param path_weights:
:param path_output_dir:
:param output_node_names:
:return:
"""
if not valid_file(path_topology):
show_invalid_message('model topology file', path_topology)
return
if not valid_file(path_weights):
show_invalid_message('model weights file', path_weights)
return
model = load_from_saved_weights(path_topology, path_weights)
enc_model = generate_encapsulate_model(model, split_layer_name_list(output_node_names))
# Generate temp Keras enc_model for further processing
save_enc_model(path_output_dir, enc_model)
convert_tfjs(path_output_dir)
clean_temp_file(path_output_dir)
def load_from_saved_model(path_model):
model = load_model(path_model)
return model
def load_from_saved_weights(path_topology, path_weights):
with open(path_topology, 'r') as f:
model = model_from_json(f.read())
model.load_weights(path_weights)
return model
def generate_encapsulate_model(model, output_layer_names=None):
"""Generate an encapsulate model
The new encapsulate model includes:
1. model.inputs from original model as enc_model.inputs
2. enc_model.outputs has two parts:
2.1 transferred identity tensors from original inputs (implemented by Lambda)
2.2 tensors from original model
2.2.1 default to all tensors from original model
2.2.2 based on provided output_layer_names to look up specified layer.output tensors
"""
if output_layer_names is None:
transfer_outputs = list(map(lambda layer: layer.output, model.layers[0:]))
else:
transfer_outputs = list(map(lambda oln: model.get_layer(oln).output, output_layer_names))
enc_model = Model(
inputs=model.inputs,
outputs=transfer_outputs
)
return enc_model
def split_layer_name_list(output_node_names):
if output_node_names is None:
return None
else:
return output_node_names.split(",")
def save_enc_model(path_output_dir, enc_model):
print("Saving enc_model...")
save_model(enc_model, path_output_dir + TEMP_MODEL_NAME)
def convert_tfjs(path_output_dir):
print("Saving converted tfjs model...")
dispatch_keras_h5_to_tensorflowjs_conversion(
path_output_dir + TEMP_MODEL_NAME,
path_output_dir
)
def clean_temp_file(path_output_dir):
print("Removing enc_model file...")
remove_file(path_output_dir + TEMP_MODEL_NAME)
```
#### File: tensorspacejs/tfjs/tfjs_conversion.py
```python
import os
import subprocess
from utility.file_utility import valid_file, valid_directory, show_invalid_message
MAIN_JS_PATH = os.path.abspath(
os.path.join(__file__, os.pardir, 'main.js')
)
def process_tfjs_model(path_input, path_output, output_names=None):
os.makedirs(path_output, exist_ok=True)
if not valid_file(path_input):
show_invalid_message('input model file', path_input)
return
if output_names is None:
subprocess.check_call(["node", MAIN_JS_PATH, path_input, path_output])
else:
output_names = "--output_layer_names=" + output_names
subprocess.check_call(["node", MAIN_JS_PATH, output_names, path_input, path_output])
print("Mission Complete!!!")
def show_tfjs_model_summary(path_input):
if not valid_file(path_input):
show_invalid_message('input model file', path_input)
return
subprocess.check_call(["node", MAIN_JS_PATH, "--summary", path_input])
```
#### File: tensorspace-converter/tensorspacejs/tsp_converters.py
```python
import argparse
import os
import sys
sys.path.append(
os.path.abspath(
os.path.join(
__file__, os.pardir
)
)
)
from tf.tensorflow_conversion import show_tf_model_summary, preprocess_tensorflow_model
from krs.keras_conversion import show_keras_model_summary, preprocess_keras_model
from tfjs.tfjs_conversion import show_tfjs_model_summary, process_tfjs_model
from install import install
from version import version, python_version, node_version, npm_version, tensorflow_version, keras_version, tensorflowjs_version
def print_hello_world():
print("Hello World from converter")
def main():
parser = argparse.ArgumentParser('TensorSpace.js model converter/preprocessor.')
parser.add_argument(
'input_path',
nargs='?',
type=str,
help='Path to the input file or directory. For input format "keras", '
'an HDF5 (.h5) file or structure (.json) and weight (.hdf5) directory '
'is expected.'
)
parser.add_argument(
'output_path', nargs='?', type=str, help='Path for all output artifacts.'
)
parser.add_argument(
'--input_model_from',
type=str,
required=False,
default='keras',
choices=set(['tensorflow', 'keras', 'tfjs']),
help='Input model type.\n'
'It could be "tensorflow", "keras", "tfjs"'
)
parser.add_argument(
'--input_model_format',
type=str,
required=False,
choices=set(['topology_weights_combined',
'topology_weights_separated',
'tf_saved',
'tf_frozen',
'tf_keras',
'tf_keras_separated']),
help='Input format.\n'
'For "topology_weights_combined", config for Keras model, input is .h5 saved by .save().\n'
'For "topology_weights_separated", config for Keras model, inputs are topology+weights.\n'
'For "tf_saved", config for TensorFlow model, input is TensorFlow saved model.\n'
'For "tf_frozen", config for TensorFlow model, input is TensorFlow frozen model.\n'
'For "tf_keras", config for TensorFlow model, input is .h5 model.\n'
'For "tf_keras_separated", config for TensorFlow model, input is topology+weights.'
)
parser.add_argument(
'--output_layer_names',
type=str,
help='The names of the output nodes, separated by slash. '
'E.g., "logits/activations".')
parser.add_argument(
'--version',
'-v',
'-V',
dest='show_version',
action='store_true',
help='Show versions of tensorspacejs and its dependencies'
)
parser.add_argument(
'-init',
dest='init',
action='store_true',
help='Init TensorSpace Converter'
)
parser.add_argument(
'--summary',
'-s',
dest='show_model_summary',
action='store_true',
help='Show summray of loaded model'
)
flags = parser.parse_args()
if flags.show_version:
print('\ntensorspacejs %s\n' % version)
print('Dependency versions:')
print('python %s' % python_version)
print('node %s' % node_version)
print('npm %s' % npm_version)
print('tensorflow %s' % tensorflow_version)
print('keras %s' % keras_version)
print('tensorflowjs %s' % tensorflowjs_version)
return
if flags.init:
install()
return
if flags.input_path is None:
raise ValueError(
'Error: The input_path argument must be set. '
'Run with --help flag for usage information.')
if flags.input_model_from not in ('tensorflow', 'keras', 'tfjs'):
raise ValueError(
'The --input_model_from flag can only be set to '
'"tensorflow", "keras", "tfjs" '
'but the current input type is "%s".' % flags.input_model_from)
if flags.input_model_from == 'keras'\
and flags.input_model_format not in (
'topology_weights_combined',
'topology_weights_separated'):
raise ValueError(
'For input_model_from == "keras", the --input_model_format flag can only be set to'
'"topology_weights_combined" and "topology_weights_separated" '
'but the current input model format is "%s".' % flags.input_model_format)
if flags.input_model_from == 'tensorflow'\
and flags.input_model_format not in (
'tf_saved',
'tf_frozen',
'tf_keras',
'tf_keras_separated'):
raise ValueError(
'For input_model_from == "tensorflow", the --input_model_format flag can only be set to'
'"tf_saved", "tf_frozen", "tf_checkpoint_model", "tf_keras", "tf_keras_separated" '
'but the current input model format is "%s".' % flags.input_model_format)
if flags.show_model_summary:
if flags.input_model_from == 'keras':
show_keras_model_summary(
flags.input_model_format,
flags.input_path
)
return
if flags.input_model_from == 'tensorflow':
show_tf_model_summary(
flags.input_model_format,
flags.input_path
)
return
if flags.input_model_from == "tfjs":
show_tfjs_model_summary(flags.input_path)
return
return
if flags.input_model_from == 'tensorflow':
preprocess_tensorflow_model(
flags.input_model_format,
flags.input_path,
flags.output_path,
flags.output_layer_names
)
return
if flags.input_model_from == 'keras':
preprocess_keras_model(
flags.input_model_format,
flags.input_path,
flags.output_path,
flags.output_layer_names
)
return
if flags.input_model_from == 'tfjs':
process_tfjs_model(
flags.input_path,
flags.output_path,
flags.output_layer_names
)
return
print("Nothing happened...")
if __name__ == '__main__':
main()
```
#### File: tensorspacejs/utility/file_utility.py
```python
import os
def remove_file(file_path):
if os.path.exists(file_path):
os.remove(file_path)
else:
print(file_path + ' does not exist.')
def valid_file(file_path):
if not os.path.exists(file_path):
print(file_path + ' does not exist.')
return False
if not os.path.isfile(file_path):
print(file_path + ' is not a file.')
return False
return True
def valid_directory(dir_path):
if not os.path.exists(dir_path):
print(dir_path + ' does not exist.')
return False
if not os.path.isdir(dir_path):
print(dir_path + ' is not a directory.')
return False
return True
def show_invalid_message(msg, invalid_parameter):
print('Aboard converting... INVALID ' + ': ' + invalid_parameter)
``` |
{
"source": "jiapeijia/pkuseg-python",
"score": 2
} |
#### File: jiapeijia/pkuseg-python/setup.py
```python
import setuptools
from distutils.extension import Extension
import numpy as np
from Cython.Build import cythonize
def setup_package():
long_description = "pkuseg-python"
extensions = [
Extension(
"pkuseg.inference",
["pkuseg/inference.pyx"],
include_dirs=[np.get_include()],
language="c++"
),
Extension(
"pkuseg.feature_extractor",
["pkuseg/feature_extractor.pyx"],
include_dirs=[np.get_include()],
),
]
setuptools.setup(
name="pkuseg",
version="0.0.14",
author="Lanco",
author_email="<EMAIL>",
description="A small package for Chinese word segmentation",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/lancopku/pkuseg-python",
packages=setuptools.find_packages(),
package_data={"": ["*.txt*", "*.pkl", "*.npz"]},
classifiers=[
"Programming Language :: Python :: 3",
"License :: Other/Proprietary License",
"Operating System :: OS Independent",
],
install_requires=["numpy"],
ext_modules=cythonize(extensions, annotate=True),
zip_safe=False,
)
if __name__ == "__main__":
setup_package()
``` |
{
"source": "JiaPeng1234/leetcode2020",
"score": 4
} |
#### File: BFS/542. 01 Matrix/Solution.py
```python
class Solution:
def updateMatrix(self, matrix: List[List[int]]) -> List[List[int]]:
"""
# step 1: create a full zero "map", all zeros in the map is set to 1
# step 2: use BFS, initialize the queue with all coordinate of zeros
# step 3: zeros begin to diffuse to its non-zero adjacent cells, update them with step
# step 4: the cells that have already been checked should be added to "map" created in step 1
"""
if len(matrix) == 0 or len(matrix[0]) == 0:
return matrix
mapp = [[0] * len(matrix[0]) for _ in range(len(matrix))]
q = collections.deque() # using deque can improve efficiency
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == 0:
q.append((i, j))
mapp[i][j] = 1
adjacent = [-1, 0, 1, 0, -1]
steps = 0
while q:
size = len(q)
while size:
size -= 1
node = q.popleft()
for i in range(4):
x = node[0] + adjacent[i]
y = node[1] + adjacent[i+1]
if x < 0 or y < 0 or x >= len(matrix) or y >= len(matrix[0]) or mapp[x][y] == 1:
continue
q.append((x, y))
mapp[x][y] = 1
matrix[x][y] = steps + 1
steps += 1
return matrix
# By
```
#### File: DFS/22. Generate Parentheses/Solution.py
```python
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
ans = []
self.dfs(n, n, "", ans)
return ans
def dfs(self, left, right, cur, ans):
# 注意点1 剪枝条件,剪枝条件根据题目的不同也可以写在出口条件之前,比如这道题回溯的变量和出口条件涉及到的是同一个变量(left和right)
# 此时可以把剪枝条件写在出口条件之前,其作用相当于for循环里的continue
if left > right or left < 0 or right < 0: # don't forget to set the boundary condition left < 0 and right < 0
return
if right == left == 0: # 注意点2 此处是出口条件
ans.append(cur)
return
# 注意点3 此处没有使用for循环,只是把两种可能性one by one地回溯
left -= 1
self.dfs(left, right, cur+"(", ans)
left += 1
right -= 1
self.dfs(left, right, cur+")", ans)
right += 1
```
#### File: DFS/47. Permutations II/Solution.py
```python
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
nums.sort()
ans = []
mask = [0] * len(nums)
self.dfs(0, nums, ans, [], mask) # 注意点1:不使用starter,而是使用一个相等大小的mask数组来标记该元素用过没有
return ans
def dfs(self, d, nums, ans, cur, mask):
if d == len(nums):
ans.append(cur)
return
for i in range(0, len(nums)):
# 注意点2:剪枝条件continue
# 和 https://github.com/XincredibleY/leetcode2020/blob/master/DFS/40.%20Combination%20Sum%20II/Solution.py 中的continue剪枝条件相对比
if i > 0 and nums[i] == nums[i-1] and mask[i-1] == 0:
continue
if mask[i] == 1:
continue
mask[i] = 1
self.dfs(d+1, nums, ans, cur+[nums[i]], mask)
mask[i] = 0
```
#### File: DFS/698. Partition to K Equal Sum Subsets/Solution.py
```python
class Solution:
def canPartitionKSubsets(self, nums: List[int], k: int) -> bool:
if k == 1:
return True
add = sum(nums)
if add % k or k > len(nums):
return False
target = add / k
nums.sort(reverse=True)
if nums[0] > target: # pruning, if not implemented, time limit exceeded
return False
def dfs(target, rest, mask, k):
if rest == 0:
k -= 1
rest = target
if k == 0:
return True
for i in range(0, len(nums)):
if mask[i] or nums[i] > rest:
continue
mask[i] = 1
if dfs(target, rest-nums[i], mask, k):
return True
mask[i] = 0
return False
mask = [0] * len(nums)
return dfs(target, target, mask, k)
```
#### File: DFS/842. Split Array into Fibonacci Sequence/Solution.py
```python
class Solution:
def splitIntoFibonacci(self, S: str) -> List[int]:
if len(S) == 0:
return []
self.ans = []
self.dfs(S, 0, [])
return self.ans
def dfs(self, S, starter, cur):
if starter == len(S) and len(cur) >= 3:
self.ans = cur
return True
for end in range(starter, min(len(S), starter + 10)): # 2^31 是十位数,最多十位数,很关键
if S[starter] == '0' and end > starter or int(S[starter:end+1]) >= math.pow(2, 31):
break
if len(cur) >= 2 and int(S[starter:end+1]) != cur[-1] + cur[-2]:
continue
if self.dfs(S, end + 1, cur + [int(S[starter:end+1])]):
return True
return False
```
#### File: Graph/207. Course Schedule/Solution.py
```python
class Solution:
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
visited = [0] * numCourses
Map = collections.defaultdict(list)
for key, item in prerequisites:
Map[key].append(item)
for i in range(numCourses):
if self.dfs(i, Map, visited):
return False
return True
def dfs(self, num, Map, visited):
if visited[num] == 1: # status == 1 indicates visiting nodes
return True
if visited[num] == 2: # important! use two status, if status == 2 indicates already make sure all its prerequisites nodes no rings
return False
visited[num] = 1
for i in Map[num]:
if self.dfs(i, Map, visited):
return True
visited[num] = 2
return False
```
#### File: LinkedList/206. Reverse Linked List/Solution.py
```python
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
A0 = None
A1 = head
A2 = head.next
while A1:
A1.next = A0
A0 = A1
A1 = A2
if A2:
A2 = A2.next
return A0
``` |
{
"source": "JiapengChi/EDSR-PyTorch",
"score": 2
} |
#### File: EDSR-PyTorch/src/trainer.py
```python
import os
import math
import mmap
from decimal import Decimal
import time
import utility
import imageio
import torch
import torch.nn.utils as utils
from tqdm import tqdm
def get_list_num_with_write(datalist, newlist, result_file):
with open(result_file, 'a') as f:
for i in datalist:
if isinstance(i, list):
get_list_num_with_write(i, newlist, result_file)
else:
newlist.append(i)
f.write(str(i))
f.write('\n')
f.close()
def get_list_num(datalist, newlist):
for i in datalist:
if isinstance(i, list):
get_list_num(i, newlist)
else:
newlist.append(i)
class Trainer():
def __init__(self, args, loader, my_model, my_loss, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.optimizer = utility.make_optimizer(args, self.model)
if self.args.load != '':
self.optimizer.load(ckp.dir, epoch=len(ckp.log))
self.error_last = 1e8
def train(self):
self.loss.step()
epoch = self.optimizer.get_last_epoch() + 1
lr = self.optimizer.get_lr()
self.ckp.write_log(
'[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr))
)
self.loss.start_log()
self.model.train()
timer_data, timer_model = utility.timer(), utility.timer()
# TEMP
self.loader_train.dataset.set_scale(0)
for batch, (lr, hr, _,) in enumerate(self.loader_train):
lr, hr = self.prepare(lr, hr)
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
sr = self.model(lr, 0)
loss = self.loss(sr, hr)
loss.backward()
if self.args.gclip > 0:
utils.clip_grad_value_(
self.model.parameters(),
self.args.gclip
)
self.optimizer.step()
timer_model.hold()
if (batch + 1) % self.args.print_every == 0:
self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(
(batch + 1) * self.args.batch_size,
len(self.loader_train.dataset),
self.loss.display_loss(batch),
timer_model.release(),
timer_data.release()))
timer_data.tic()
self.loss.end_log(len(self.loader_train))
self.error_last = self.loss.log[-1, -1]
self.optimizer.schedule()
def test(self):
d_gap = 128
width = 4096 / d_gap
height = 2048 / d_gap
list_visited = []
torch.set_grad_enabled(False)
epoch = self.optimizer.get_last_epoch()
self.ckp.write_log('\nEvaluation:')
self.ckp.add_log(
torch.zeros(1, len(self.loader_test), len(self.scale))
)
self.model.eval()
mmap_file = mmap.mmap(-1, 67108864, access=mmap.ACCESS_WRITE, tagname='sharemem')
sr_mmap_file = mmap.mmap(-1, 40960, access=mmap.ACCESS_WRITE, tagname='sr')
loop_count = '-1'
result_file_name = 'results.txt'
timer_test = utility.timer()
data_dic_lr = {}
data_dic_hr = {}
# if self.args.save_results: self.ckp.begin_background()
for idx_data, d in enumerate(self.loader_test):
for idx_scale, scale in enumerate(self.scale):
print(self.loader_test)
d.dataset.set_scale(idx_scale)
for lr_ori, hr_ori, filename_ori in d:
data_dic_lr[filename_ori[0]] = lr_ori
data_dic_hr[filename_ori[0]] = hr_ori
print("Preprocess Completed!")
# for lr, hr, filename in tqdm(d, ncols=200):
partition_mmap_file = mmap.mmap(-1, 40960, access=mmap.ACCESS_WRITE, tagname='partition')
while_loop = 1
while while_loop == 1:
partition_mmap_file.seek(0)
mmap_partition_num_0 = int(partition_mmap_file.read_byte())
# if mmap_partition_num_0 != 0:
# print(mmap_partition_num_0)
mmap_partition_num_1 = int(partition_mmap_file.read_byte())
# if mmap_partition_num_1 != 0:
# print(mmap_partition_num_1)
mmap_partition_num_2 = int(partition_mmap_file.read_byte())
# if mmap_partition_num_2 != 0:
# print(mmap_partition_num_2)
mmap_partition_num_3 = int(partition_mmap_file.read_byte())
# if mmap_partition_num_3 != 0:
# print(mmap_partition_num_3)
mmap_partition_num = str(
1000 * mmap_partition_num_0 + 100 * mmap_partition_num_1 + 10 * mmap_partition_num_2 + 1 * mmap_partition_num_3 - 1)
if mmap_partition_num != loop_count and mmap_partition_num != '-1':
list_cand = []
num_center = int(mmap_partition_num)
if num_center not in list_visited:
list_cand.append(num_center)
list_visited.append(num_center)
num_top = num_center - width
if num_top >= 0 and num_top not in list_visited:
list_cand.append(num_top)
list_visited.append(num_top)
num_bottom = num_center + width
if num_bottom < width * height and num_bottom not in list_visited:
list_cand.append(num_bottom)
list_visited.append(num_bottom)
num_left = num_center - 1
if num_left <= int(num_center / width) * width and num_left not in list_visited:
list_cand.append(num_left)
list_visited.append(num_left)
else:
new_partition_num = (int(num_center / width) + 1) * width - 1
if new_partition_num not in list_visited:
list_cand.append(new_partition_num)
list_visited.append(new_partition_num)
num_right = num_center + 1
if num_right < (int(num_center / width) + 1) * width and num_right not in list_visited:
list_cand.append(num_right)
list_visited.append(num_right)
else:
new_partition_num = int(num_center / width) * width
if new_partition_num not in list_visited:
list_cand.append(new_partition_num)
list_visited.append(new_partition_num)
num_top_left = num_center - width - 1
if num_top_left >= 0:
if num_top_left <= int(num_top / width) * width and num_top_left not in list_visited:
list_cand.append(num_top_left)
list_visited.append(num_top_left)
else:
new_partition_num = (int(num_top / width) + 1) * width - 1
if new_partition_num not in list_visited:
list_cand.append(new_partition_num)
list_visited.append(new_partition_num)
num_top_right = num_center - width + 1
if num_top_right >= 0:
if num_top_right < (int(num_top / width) + 1) * width and num_top_right not in list_visited:
list_cand.append(num_top_right)
list_visited.append(num_top_right)
else:
new_partition_num = int(num_top / width) * width
if new_partition_num not in list_visited:
list_cand.append(new_partition_num)
list_visited.append(new_partition_num)
num_bottom_left = num_center + width - 1
if num_bottom_left < width * height:
if num_bottom_left <= int(num_bottom / width) * width and num_bottom_left not in list_visited:
list_cand.append(num_bottom_left)
list_visited.append(num_bottom_left)
else:
new_partition_num = (int(num_bottom / width) + 1) * width - 1
if new_partition_num not in list_visited:
list_cand.append(new_partition_num)
list_visited.append(new_partition_num)
num_bottom_right = num_center + width + 1
if num_bottom_right < width * height:
if num_bottom_right >= 0:
if num_bottom_right < (int(num_bottom / width) + 1) * width and num_bottom_right not in list_visited:
list_cand.append(num_bottom_right)
list_visited.append(num_bottom_right)
else:
new_partition_num = int(num_bottom / width) * width
if new_partition_num not in list_visited:
list_cand.append(new_partition_num)
list_visited.append(new_partition_num)
for cand in list_cand:
mmap_partition_num = str(int(cand))
loop_count = mmap_partition_num
filename = 'Tile-' + mmap_partition_num
lr = data_dic_lr[filename]
hr = data_dic_hr[filename]
lr, hr = self.prepare(lr, hr)
sr = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range)
save_list = [sr]
self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr(
sr, hr, scale, self.args.rgb_range, dataset=d
)
if self.args.save_gt:
save_list.extend([lr, hr])
if self.args.save_results:
self.ckp.save_results(d, filename[0], save_list, scale)
postfix = ('SR', 'LR', 'HR')
for v, p in zip(save_list, postfix):
normalized = v[0].mul(255 / self.args.rgb_range)
tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()
data_list = tensor_cpu.numpy().tolist()
new_list = []
get_list_num(data_list, new_list)
partitionBytesPosition = d_gap * d_gap * 3 * int(mmap_partition_num)
mmap_file.seek(partitionBytesPosition)
mmap_file.write(bytes(new_list))
# sr_list = []
# sr_list.append(int(mmap_partition_num) + 1)
# sr_mmap_file.write(bytes(sr_list))
sr_list = list(map(int, list(str(int(mmap_partition_num) + 1).zfill(4))))
sr_mmap_file.write(bytes(sr_list))
print("Partition " + mmap_partition_num)
self.ckp.log[-1, idx_data, idx_scale] /= len(d)
best = self.ckp.log.max(0)
self.ckp.write_log(
'[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(
d.dataset.name,
scale,
self.ckp.log[-1, idx_data, idx_scale],
best[0][idx_data, idx_scale],
best[1][idx_data, idx_scale] + 1
)
)
time.sleep(60)
mmap_file.close()
self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc()))
self.ckp.write_log('Saving...')
# if self.args.save_results:
# self.ckp.end_background()
if not self.args.test_only:
self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch))
self.ckp.write_log(
'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True
)
torch.set_grad_enabled(True)
def prepare(self, *args):
device = torch.device('cpu' if self.args.cpu else 'cuda')
def _prepare(tensor):
if self.args.precision == 'half': tensor = tensor.half()
return tensor.to(device)
return [_prepare(a) for a in args]
def terminate(self):
if self.args.test_only:
self.test()
return True
else:
epoch = self.optimizer.get_last_epoch() + 1
return epoch >= self.args.epochs
``` |
{
"source": "JiapengLi/pqcom",
"score": 2
} |
#### File: pqcom/pqcom/pqcom_setup_ui.py
```python
from PySide import QtCore, QtGui
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(236, 218)
self.verticalLayout = QtGui.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.formLayout = QtGui.QFormLayout()
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName("formLayout")
self.label = QtGui.QLabel(Dialog)
self.label.setObjectName("label")
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label)
self.portComboBox = CustomComboBox(Dialog)
self.portComboBox.setEditable(True)
self.portComboBox.setObjectName("portComboBox")
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.portComboBox)
self.label_2 = QtGui.QLabel(Dialog)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_2)
self.baudComboBox = QtGui.QComboBox(Dialog)
self.baudComboBox.setEditable(True)
self.baudComboBox.setObjectName("baudComboBox")
self.baudComboBox.addItem("")
self.baudComboBox.addItem("")
self.baudComboBox.addItem("")
self.baudComboBox.addItem("")
self.baudComboBox.addItem("")
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.baudComboBox)
self.label_3 = QtGui.QLabel(Dialog)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_3)
self.stopbitComboBox = QtGui.QComboBox(Dialog)
self.stopbitComboBox.setObjectName("stopbitComboBox")
self.stopbitComboBox.addItem("")
self.stopbitComboBox.addItem("")
self.stopbitComboBox.addItem("")
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.stopbitComboBox)
self.dataComboBox = QtGui.QComboBox(Dialog)
self.dataComboBox.setEditable(True)
self.dataComboBox.setObjectName("dataComboBox")
self.dataComboBox.addItem("")
self.dataComboBox.addItem("")
self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.dataComboBox)
self.label_5 = QtGui.QLabel(Dialog)
self.label_5.setObjectName("label_5")
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_5)
self.parityComboBox = QtGui.QComboBox(Dialog)
self.parityComboBox.setObjectName("parityComboBox")
self.parityComboBox.addItem("")
self.parityComboBox.addItem("")
self.parityComboBox.addItem("")
self.parityComboBox.addItem("")
self.parityComboBox.addItem("")
self.formLayout.setWidget(4, QtGui.QFormLayout.FieldRole, self.parityComboBox)
self.label_4 = QtGui.QLabel(Dialog)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_4)
self.verticalLayout.addLayout(self.formLayout)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Open)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "pqcom", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Dialog", "Port Name", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Dialog", "Baud Rate", None, QtGui.QApplication.UnicodeUTF8))
self.baudComboBox.setItemText(0, QtGui.QApplication.translate("Dialog", "115200", None, QtGui.QApplication.UnicodeUTF8))
self.baudComboBox.setItemText(1, QtGui.QApplication.translate("Dialog", "57600", None, QtGui.QApplication.UnicodeUTF8))
self.baudComboBox.setItemText(2, QtGui.QApplication.translate("Dialog", "38400", None, QtGui.QApplication.UnicodeUTF8))
self.baudComboBox.setItemText(3, QtGui.QApplication.translate("Dialog", "19200", None, QtGui.QApplication.UnicodeUTF8))
self.baudComboBox.setItemText(4, QtGui.QApplication.translate("Dialog", "9600", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("Dialog", "Stop Bits", None, QtGui.QApplication.UnicodeUTF8))
self.stopbitComboBox.setItemText(0, QtGui.QApplication.translate("Dialog", "1", None, QtGui.QApplication.UnicodeUTF8))
self.stopbitComboBox.setItemText(1, QtGui.QApplication.translate("Dialog", "1.5", None, QtGui.QApplication.UnicodeUTF8))
self.stopbitComboBox.setItemText(2, QtGui.QApplication.translate("Dialog", "2", None, QtGui.QApplication.UnicodeUTF8))
self.dataComboBox.setItemText(0, QtGui.QApplication.translate("Dialog", "8", None, QtGui.QApplication.UnicodeUTF8))
self.dataComboBox.setItemText(1, QtGui.QApplication.translate("Dialog", "7", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("Dialog", "Data Bits", None, QtGui.QApplication.UnicodeUTF8))
self.parityComboBox.setItemText(0, QtGui.QApplication.translate("Dialog", "None", None, QtGui.QApplication.UnicodeUTF8))
self.parityComboBox.setItemText(1, QtGui.QApplication.translate("Dialog", "Even", None, QtGui.QApplication.UnicodeUTF8))
self.parityComboBox.setItemText(2, QtGui.QApplication.translate("Dialog", "Odd", None, QtGui.QApplication.UnicodeUTF8))
self.parityComboBox.setItemText(3, QtGui.QApplication.translate("Dialog", "Mark", None, QtGui.QApplication.UnicodeUTF8))
self.parityComboBox.setItemText(4, QtGui.QApplication.translate("Dialog", "Space", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("Dialog", "Parity", None, QtGui.QApplication.UnicodeUTF8))
class CustomComboBox(QtGui.QComboBox):
clicked = QtCore.Signal()
def __init__(self, parent):
super(CustomComboBox, self).__init__(parent)
def showPopup(self):
self.clicked.emit()
super(CustomComboBox, self).showPopup()
``` |
{
"source": "jiapinai/kale",
"score": 2
} |
#### File: kale/nbparser/parser.py
```python
import re
import warnings
import networkx as nx
warnings.filterwarnings("default", category=DeprecationWarning,
module=__name__)
_TAGS_LANGUAGE = [r'^imports$',
r'^functions$',
r'^pipeline-parameters$',
r'^skip$',
# Extension may end up with 'block:' as a tag. We handle
# that as if it was empty.
# TODO: Deprecate `block` tag in future release
r'^block:([_a-z]([_a-z0-9]*)?)?$',
# `step` has the same functionality as `block` and is
# supposed to be the new name
r'^step:([_a-z]([_a-z0-9]*)?)?$',
r'^prev:[_a-z]([_a-z0-9]*)?$']
def parse_metadata(metadata):
"""Parse a notebook's cell's metadata field.
The Kale UI writes Kale specific tags inside the 'tags' field, as a list
of string tags. Supported tags are defined by _TAGS_LANGUAGE.
Args:
metadata (dict): a dictionary containing a notebook's cell's metadata
Returns (dict): parsed tags based on Kale tagging language
"""
parsed_tags = dict()
# `step_names` is a list because a notebook cell might be assigned to more
# than one Pipeline step.
parsed_tags['step_names'] = list()
parsed_tags['prev_steps'] = list()
# the notebook cell was not tagged
if 'tags' not in metadata or len(metadata['tags']) == 0:
return parsed_tags
for t in metadata['tags']:
if not isinstance(t, str):
raise ValueError("Tags must be string. Found tag %s of type %s"
% (t, type(t)))
# Check that the tag is defined by the Kale tagging language
if any(re.match(_t, t) for _t in _TAGS_LANGUAGE) is False:
raise ValueError("Unrecognized tag: {}".format(t))
# Special tags have a specific effect on the cell they belong to.
# Specifically:
# - skip: ignore the notebook cell
# - pipeline-parameters: use the cell to populate Pipeline
# parameters. The cell must contain only assignment expressions
# - imports: the code of the corresponding cell(s) will be prepended
# to every Pipeline step
# - functions: same as imports, but the corresponding code is placed
# **after** `imports`
special_tags = ['skip', 'pipeline-parameters', 'imports', 'functions']
if t in special_tags:
parsed_tags['step_names'] = [t]
return parsed_tags
# now only `block|step` and `prev` tags remain to be parsed.
tag_name, value = t.split(':')
# name of the future Pipeline step
# TODO: Deprecate `block` in future release
if tag_name in ["block", "step"] and value:
if tag_name == "block":
warnings.warn("`block` tag will be deprecated in a future"
" version, use `step` tag instead",
DeprecationWarning)
parsed_tags['step_names'].append(value)
# name(s) of the father Pipeline step(s)
if tag_name == "prev":
parsed_tags['prev_steps'].append(value)
if not parsed_tags['step_names'] and parsed_tags['prev_steps']:
raise ValueError("A cell can not provide `prev` annotations without "
"providing a `block` or `step` annotation as well")
return parsed_tags
def merge_code(nb_graph, dst, code):
"""Add a new code block to an existing graph node.
Note: Updates inplace the input graph.
Args:
nb_graph (nx.DiGraph): Pipeline graph
dst (str): Name id of the destination node
code (str): Python source code to be appended to dst node
"""
source_code = nb_graph.nodes(data=True)[dst]['source']
# update pipeline block source code
nx.set_node_attributes(nb_graph, {dst: {'source': source_code + [code]}})
def parse_notebook(notebook):
"""Creates a NetworkX graph based on the input notebook's tags.
Cell's source code are embedded into the graph as node attributes.
Args:
notebook: nbformat's notebook object
"""
# output graph
nb_graph = nx.DiGraph()
# will be assigned at the end of each for loop
prev_step_name = None
# All the code cells that have to be pre-pended to every pipeline step
# (i.e., imports and functions) are merged here
imports_block = list()
functions_block = list()
# Variables that will become pipeline parameters
pipeline_parameters = list()
# iterate over the notebook cells, from first to last
for c in notebook.cells:
# parse only source code cells
if c.cell_type != "code":
continue
tags = parse_metadata(c.metadata)
if len(tags['step_names']) > 1:
raise NotImplementedError("Kale does not yet support multiple "
"step names in a single notebook cell. "
"One notebook cell was found with %s "
"step names" % tags['step_names'])
step_name = tags['step_names'][0] \
if 0 < len(tags['step_names']) \
else None
if step_name == 'skip':
# when the cell is skipped, don't store `skip` as the previous
# active cell
continue
if step_name == 'pipeline-parameters':
pipeline_parameters.append(c.source)
prev_step_name = step_name
continue
if step_name == 'imports':
imports_block.append(c.source)
prev_step_name = step_name
continue
if step_name == 'functions':
functions_block.append(c.source)
prev_step_name = step_name
continue
# if none of the above apply, then we are parsing a code cell with
# a block names and (possibly) some dependencies
# if the cell was not tagged with a step name,
# add the code to the previous cell
if not step_name:
if prev_step_name == 'imports':
imports_block.append(c.source)
if prev_step_name == 'functions':
functions_block.append(c.source)
if prev_step_name == 'pipeline-parameters':
pipeline_parameters.append(c.source)
# current_block might be None in case the first cells of the
# notebooks have not been tagged.
if prev_step_name:
# this notebook cell will be merged to a previous one that
# specified a step name
merge_code(nb_graph, prev_step_name, c.source)
else:
# add node to DAG, adding tags and source code of notebook cell
if step_name not in nb_graph.nodes:
nb_graph.add_node(step_name, source=[c.source],
ins=set(), outs=set())
for _prev_step in tags['prev_steps']:
if _prev_step not in nb_graph.nodes:
raise ValueError("Step %s does not exist. It was "
"defined as previous step of %s"
% (_prev_step, tags['step_names']))
nb_graph.add_edge(_prev_step, step_name)
else:
merge_code(nb_graph, step_name, c.source)
prev_step_name = step_name
# Prepend any `imports` and `functions` cells to every Pipeline step
for step in nb_graph:
step_source = nb_graph.nodes(data=True)[step]['source']
step_source = imports_block + functions_block + step_source
nx.set_node_attributes(nb_graph, {step: {'source': step_source}})
# merge together pipeline parameters
pipeline_parameters = '\n'.join(pipeline_parameters)
# make the nodes' code a single multiline string
# NOTICE: this is temporary, waiting for the artifacts-viz-feature
for step in nb_graph:
step_source = nb_graph.nodes(data=True)[step]['source']
nx.set_node_attributes(nb_graph,
{step: {'source': '\n'.join(step_source)}})
return nb_graph, pipeline_parameters
```
#### File: tests/assets/func02.out.py
```python
def test(arg1, arg2, arg3):
import os
import shutil
from kale.utils import pod_utils as _kale_pod_utils
from kale.marshal import resource_save as _kale_resource_save
from kale.marshal import resource_load as _kale_resource_load
_kale_data_directory = ""
if not os.path.isdir(_kale_data_directory):
os.makedirs(_kale_data_directory, exist_ok=True)
```
#### File: tests/assets/func05.out.py
```python
def test():
import os
import shutil
from kale.utils import pod_utils as _kale_pod_utils
from kale.marshal import resource_save as _kale_resource_save
from kale.marshal import resource_load as _kale_resource_load
_kale_data_directory = ""
if not os.path.isdir(_kale_data_directory):
os.makedirs(_kale_data_directory, exist_ok=True)
v1 = "Hello"
print(v1)
# -----------------------DATA SAVING START---------------------------------
if "v1" in locals():
_kale_resource_save(
v1, os.path.join(_kale_data_directory, "v1"))
else:
print("_kale_resource_save: `v1` not found.")
# -----------------------DATA SAVING END-----------------------------------
``` |
{
"source": "Jiaqi0602/adversarial-attack-from-leakage",
"score": 3
} |
#### File: adversarial-attack-from-leakage/inversefed/metrics.py
```python
import torch
import torchvision
from collections import defaultdict
class InceptionScore(torch.nn.Module):
"""Class that manages and returns the inception score of images."""
def __init__(self, batch_size=32, setup=dict(device=torch.device('cpu'), dtype=torch.float)):
"""Initialize with setup and target inception batch size."""
super().__init__()
self.preprocessing = torch.nn.Upsample(size=(299, 299), mode='bilinear', align_corners=False)
self.model = torchvision.models.inception_v3(pretrained=True).to(**setup)
self.model.eval()
self.batch_size = batch_size
def forward(self, image_batch):
"""Image batch should have dimensions BCHW and should be normalized.
B should be divisible by self.batch_size.
"""
B, C, H, W = image_batch.shape
batches = B // self.batch_size
scores = []
for batch in range(batches):
input = self.preprocessing(image_batch[batch * self.batch_size: (batch + 1) * self.batch_size])
scores.append(self.model(input))
prob_yx = torch.nn.functional.softmax(torch.cat(scores, 0), dim=1)
entropy = torch.where(prob_yx > 0, -prob_yx * prob_yx.log(), torch.zeros_like(prob_yx))
return entropy.mean()
def psnr(img_batch, ref_batch, batched=False, factor=1.0):
"""Standard PSNR."""
def get_psnr(img_in, img_ref):
mse = ((img_in - img_ref)**2).mean()
if mse > 0 and torch.isfinite(mse):
return (10 * torch.log10(factor**2 / mse))
elif not torch.isfinite(mse):
return img_batch.new_tensor(float('nan'))
else:
return img_batch.new_tensor(float('inf'))
if batched:
psnr = get_psnr(img_batch.detach(), ref_batch)
else:
[B, C, m, n] = img_batch.shape
psnrs = []
for sample in range(B):
psnrs.append(get_psnr(img_batch.detach()[sample, :, :, :], ref_batch[sample, :, :, :]))
psnr = torch.stack(psnrs, dim=0).mean()
return psnr.item()
def total_variation(x):
"""Anisotropic TV."""
dx = torch.mean(torch.abs(x[:, :, :, :-1] - x[:, :, :, 1:]))
dy = torch.mean(torch.abs(x[:, :, :-1, :] - x[:, :, 1:, :]))
return dx + dy
def activation_errors(model, x1, x2):
"""Compute activation-level error metrics for every module in the network."""
model.eval()
device = next(model.parameters()).device
hooks = []
data = defaultdict(dict)
inputs = torch.cat((x1, x2), dim=0)
separator = x1.shape[0]
def check_activations(self, input, output):
module_name = str(*[name for name, mod in model.named_modules() if self is mod])
try:
layer_inputs = input[0].detach()
residual = (layer_inputs[:separator] - layer_inputs[separator:]).pow(2)
se_error = residual.sum()
mse_error = residual.mean()
sim = torch.nn.functional.cosine_similarity(layer_inputs[:separator].flatten(),
layer_inputs[separator:].flatten(),
dim=0, eps=1e-8).detach()
data['se'][module_name] = se_error.item()
data['mse'][module_name] = mse_error.item()
data['sim'][module_name] = sim.item()
except (KeyboardInterrupt, SystemExit):
raise
except AttributeError:
pass
for name, module in model.named_modules():
hooks.append(module.register_forward_hook(check_activations))
try:
outputs = model(inputs.to(device))
for hook in hooks:
hook.remove()
except Exception as e:
for hook in hooks:
hook.remove()
raise
return data
```
#### File: adversarial-attack-from-leakage/utils/dataloader.py
```python
from inversefed import consts
import torch
from torchvision import datasets, transforms
class DataLoader:
def __init__(self, data, device):
self.data = data
self.device = device
def get_mean_std(self):
if self.data == 'cifar10':
mean, std = consts.cifar10_mean, consts.cifar10_std
elif self.data == 'cifar100':
mean, std = consts.cifar100_mean, consts.cifar100_std
elif self.data == 'mnist':
mean, std = consts.mnist_mean, consts.mnist_std
elif self.data == 'imagenet':
mean, std = consts.imagenet_mean, consts.imagenet_std
else:
raise Exception("dataset not found")
return mean, std
def get_data_info(self):
mean, std = self.get_mean_std()
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean, std)])
dm = torch.as_tensor(mean)[:, None, None].to(self.device)
ds = torch.as_tensor(std)[:, None, None].to(self.device)
data_root = 'data/cifar_data'
# data_root = '~/.torch'
if self.data == 'cifar10':
dataset = datasets.CIFAR10(root=data_root, download=True, train=False, transform=transform)
elif self.data == 'cifar100':
dataset = datasets.CIFAR100(root=data_root, download=True, train=False, transform=transform)
elif self.data == 'mnist':
dataset = datasets.MNIST(root=data_root, download=True, train=False, transform=transform)
elif self.data == 'imagenet':
dataset = datasets.ImageNet(root=data_root, download=True, train=False, transform=transform)
else:
raise Exception("dataset not found, load your own datasets")
data_shape = dataset[0][0].shape
classes = dataset.classes
return dataset, data_shape, classes, (dm, ds)
``` |
{
"source": "jiaqi0811/Weed_Detection",
"score": 2
} |
#### File: Weed_Detection/samples/Weed_retrain.py
```python
import os
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
import yaml
from PIL import Image
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
from mrcnn import model as modellib
# Directory to save logs and trained models
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
iter_num = 0
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mrcnn/mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
# Local path to weed-dataset
dataset_root_path = os.path.join(ROOT_DIR, "train_data")
# Directory of images to run detection on
IMAGE_DIR = os.path.join(ROOT_DIR, "images")
class ShapesConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "shapes"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # background + 1 class
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 64
IMAGE_MAX_DIM = 512
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8 * 6, 16 * 6, 32 * 6, 64 * 6, 128 * 6) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 32
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
# use small validation steps since the epoch is small
VALIDATION_STEPS = 5
class WeedDataset(utils.Dataset):
# get the number of object
def get_obj_index(self, image):
n = np.max(image)
return n
# read yaml file to get the label of weed
def from_yaml_get_class(self, image_id):
info = self.image_info[image_id]
with open(info['yaml_path']) as f:
temp = yaml.load(f.read())
labels = temp['label_names']
del labels[0]
return labels
def draw_mask(self, num_obj, mask, image, image_id):
info = self.image_info[image_id]
for index in range(num_obj):
for i in range(info['width']):
for j in range(info['height']):
at_pixel = image.getpixel((i, j))
if at_pixel == index + 1:
mask[j, i, index] = 1
return mask
# yaml_pathdataset_root_path = "/weed_dateset/"
# img_folder = dataset_root_path + "pic"
# mask_folder = dataset_root_path + "cv2_mask"
# dataset_root_path = "/weed_dateset/"
def load_shapes(self, count, img_folder, mask_folder, imglist, dataset_root_path):
"""Generate the requested number of synthetic images.
count: number of images to generate.
height, width: the size of the generated images.
"""
# Add classes
self.add_class("shapes", 1, "Weed") # weed
for i in range(count):
# pic height and width
filestr = imglist[i].split(".")[0]
mask_path = mask_folder + "/" + filestr + ".png"
yaml_path = dataset_root_path + "/labelme_json/" + filestr + "_json/info.yaml"
cv_img = cv2.imread(dataset_root_path + "/labelme_json/" + filestr + "_json/img.png")
self.add_image("shapes", image_id=i, path=img_folder + "/" + imglist[i],
width=cv_img.shape[1], height=cv_img.shape[0], mask_path=mask_path, yaml_path=yaml_path)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
global iter_num
print("image_id", image_id)
info = self.image_info[image_id]
count = 1 # number of object
img = Image.open(info['mask_path'])
num_obj = self.get_obj_index(img)
mask = np.zeros([info['height'], info['width'], num_obj], dtype=np.uint8)
mask = self.draw_mask(num_obj, mask, img, image_id)
occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)
for i in range(count - 2, -1, -1):
mask[:, :, i] = mask[:, :, i] * occlusion
occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))
labels = []
labels = self.from_yaml_get_class(image_id)
labels_form = []
for i in range(len(labels)):
if labels[i].find("Weed") != -1:
# print "weed"
labels_form.append("Weed")
class_ids = np.array([self.class_names.index(s) for s in labels_form])
return mask.astype(np.bool), class_ids.astype(np.int32)
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
return ax
def train_model():
img_folder = os.path.join(dataset_root_path, "pic")
mask_folder = os.path.join(dataset_root_path, "cv2_mask")
imglist = os.listdir(img_folder)
count = len(imglist)
# train and val dataset
dataset_train = WeedDataset()
dataset_train.load_shapes(count, img_folder, mask_folder, imglist, dataset_root_path)
dataset_train.prepare()
dataset_val = WeedDataset()
dataset_val.load_shapes(7, img_folder, mask_folder, imglist, dataset_root_path)
dataset_val.prepare()
# Create models in training mode
config = ShapesConfig()
model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR)
# first with coco and the with last
init_with = "coco" # imagenet, coco, or last
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last models you trained and continue training
checkpoint_file = model.find_last()
model.load_weights(checkpoint_file, by_name=True)
# Train the head branches
# Passing layers="heads" freezes all layers except the head
# layers. You can also pass a regular expression to select
# which layers to train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=10,
layers='heads')
# Fine tune all layers
# Passing layers="all" trains all layers. You can also
# pass a regular expression to select which layers to
# train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=30,
layers="all")
class WeedConfig(ShapesConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
def predict():
import skimage.io
from mrcnn import visualize
# Create models in training mode
config = WeedConfig()
model = modellib.MaskRCNN(mode="inference", config=config, model_dir=MODEL_DIR)
model_path = model.find_last()
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
class_names = ['BG', 'Weed']
# Load a random image from the images folder
filename = os.path.join(IMAGE_DIR, '4.jpg')
image = skimage.io.imread(filename)
# Run detection
results = model.detect([image], verbose=1)
# Visualize results
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'])
if __name__ == "__main__":
train_model()
#predict()
``` |
{
"source": "jiaqi61/AsySPA",
"score": 4
} |
#### File: AsySPA/asyspa/logistic_regression.py
```python
import time
import numpy as np
class LogisticRegression:
""" Multi-classes logistic regression problem. """
def __init__(self, samples, labels, reg = 1 ,dtype = np.float64):
"""
Initialize the problem with given data.
Parameters
----------
samples: 2-D array. Each column is an instance. The number of columns
is the number of samples, and the number of rows is the number
of features.
labels: 2-D array. Each column is an 1-hot vector of each sample. The
number of columns is the number of samples, and the number of
rows is the number of classes.
reg: Positive scalar. Regularization factor
dtype: dtype, optional. The type of the data. Default: np.float32
"""
assert samples.ndim == 2 and labels.ndim ==2, "Samples and labels should be 2D arrays"
self.dtype = dtype
self.samples = np.asarray(samples, dtype = dtype)
self.labels = np.asarray(labels, dtype = np.int)
(self.n_f, self.n_s) = self.samples.shape # numbers of features and samples
self.n_c = self.labels.shape[0] # numbers of classes
assert self.n_s == self.labels.shape[1], "Samples and labels should have the same columns"
if reg < 0:
reg = 1
self.reg = reg
def obj_func(self, x):
"""
The objective function to be minimized.
Parameters
----------
x: 2-D array. The weights matrix to be estimated with
size (num_features, num_classes)
"""
num = np.exp(x.T.dot(self.samples)) # numerator
den = np.sum(num, axis = 0) # denominator
lr = np.sum(np.log(num.T[self.labels.astype(bool).T] / den))
# regularization term
r = 0.5 * self.reg * np.linalg.norm(x, ord='fro') ** 2
return (-lr + r)
def gradient(self, x):
"""
The gradient of the objective function w.r.t x.
Parameters
----------
x: 2-D array. The weights matrix to be estimated with
size (num_features, num_classes)
"""
temp0 = x.T.dot(self.samples)
temp0 = temp0 - np.max(temp0) # To avoid overflow
np.clip(temp0,-600,1,out=temp0) # To avoid underflow
temp = np.exp(temp0).T
grad_lr = self.samples.dot(self.labels.T - temp / np.sum(temp, axis = 1).reshape(-1,1))
grad_r = self.reg * x # the gradient of the regularization term
return (-grad_lr + grad_r)
def minimizer(self, x_start = None,
step_size = -1,
terminate_by_time = False,
termination_condition=1000,
epi = 1e-3,
log = False,
constant_stepsize = False):
"""
Minimize the logistic regression problem using a decaying stepsize
Parameters
----------
x_start: 2D array with size (num_features, num_classes). Initial point of x.
step_size: Positive scalar. The initial stepsize.
max_ite: Positive integer. The max number of iterations.
terminate_by_time: Algorithm terminates by time or the number of iterations
termination_condition: If terminate_by_time is true, then the algorithm run at most termination_condition seconds.
Otherwise, the algorithm run at most termination_condition iterations.
epi: Positive scalar. Algorithm stops immediately if the norm of gradient is smaller then epi
log: Whether to log the history.
constant_stepsize: Whether to use a constant stepsize.
"""
if x_start is None:
x_start = np.random.randn(self.n_f, self.n_c)
if step_size == -1:
step_size = 1 / self.n_s # more instances, smaller stepsize
else:
step_size = step_size / self.n_s
x = x_start
x_history = np.asarray([x])
t = 0 # running time
itr = 0 # iterations
t_start = time.time()
condition = True # terminating condition
# start the optimization loop
while condition:
itr += 1
grad = self.gradient(x)
grad_norm = np.linalg.norm(grad) / self.n_s
if constant_stepsize is False:
x = x - step_size / (itr ** 0.5) * grad # Use the 1/sqrt(k) stepsize
else:
x = x - step_size * grad
# log the estimates
if log is True:
x_history = np.concatenate((x_history, [x]))
t = np.append(t, time.time() - t_start)
# print the averaged value of objective function and gradient
if itr % 20 == 0: # print every 20 iterations
obj = self.obj_func(x)
print('k='+str(itr),
'\ttime='+str(int(time.time() - t_start)),
'\tfunc='+str(obj / self.n_s),
'\tgrad='+str(grad_norm),
flush = True)
# update the terminating condition
if terminate_by_time is True:
condition = t[-1] < termination_condition
else:
condition = itr < termination_condition
condition = condition and grad_norm > epi
if log is True:
return (x, x_history, t)
else:
return x
if __name__ == "__main__":
def demo(num_instances, num_features, num_classes):
"""
Run a demo
"""
# Create datasets
labels = np.zeros((num_classes, num_instances))
label_vec = np.random.randint(low = 0, high = num_classes, size = num_instances)
labels[label_vec, range(num_instances)] = 1 # one-hot labels
samples = np.random.randn(num_features, num_instances)
# Initialize the problem
lr = LogisticRegression(samples = samples, labels = labels)
# Solve the problem
x = lr.minimizer()
print(str(x))
return x
x = demo(4000, 10, 3)
``` |
{
"source": "jiaqianjing/ChineseBERT-Paddle",
"score": 3
} |
#### File: transformers/chinesebert/tokenizer.py
```python
import json
import os
from typing import List
import numpy as np
from pypinyin import Style, pinyin
from .. import BasicTokenizer, PretrainedTokenizer, WordpieceTokenizer
__all__ = ['ChineseBertTokenizer']
class ChineseBertTokenizer(PretrainedTokenizer):
resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained
pretrained_resource_files_map = {}
pretrained_init_configuration = {}
padding_side = 'right'
def __init__(self,
bert_path,
max_seq_len=512,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]"):
vocab_file = os.path.join(bert_path, 'vocab.txt')
config_path = os.path.join(bert_path, 'config')
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the "
"vocabulary from a pretrained model please use "
"`tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
.format(vocab_file))
self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token)
self.max_seq_len = max_seq_len
# load pinyin map dict
with open(os.path.join(config_path, 'pinyin_map.json'),
encoding='utf8') as fin:
self.pinyin_dict = json.load(fin)
# load char id map tensor
with open(os.path.join(config_path, 'id2pinyin.json'),
encoding='utf8') as fin:
self.id2pinyin = json.load(fin)
# load pinyin map tensor
with open(os.path.join(config_path, 'pinyin2tensor.json'),
encoding='utf8') as fin:
self.pinyin2tensor = json.load(fin)
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab,
unk_token=unk_token)
def tokenize_sentence(self, sentence):
# convert sentence to ids
tokenizer_output = self.encode(sentence)
input_ids = tokenizer_output['input_ids']
pinyin_ids = self.convert_sentence_to_pinyin_ids(sentence)
# assert,token nums should be same as pinyin token nums
# assert len(input_ids) <= self.max_seq_len
# assert len(input_ids) == len(pinyin_ids)
# convert list to tensor
# input_ids = paddle.to_tensor(input_ids)
# pinyin_ids = paddle.to_tensor(pinyin_ids).reshape([-1])
# convert list to np.array
input_ids = np.array(input_ids)
pinyin_ids = np.array(pinyin_ids).reshape([-1, 8])
return {"input_ids": input_ids, "pinyin_ids": pinyin_ids}
def convert_sentence_to_pinyin_ids(self, sentence: str, with_specail_token=True) -> List[List[int]]:
# get offsets
bert_tokens_offsets = self.get_offset_mapping(sentence)
if with_specail_token:
bert_tokens_offsets.insert(0, (0, 0))
bert_tokens_offsets.append((0, 0))
# get tokens
bert_tokens_tokens = self.tokenize(sentence)
if with_specail_token:
bert_tokens_tokens.insert(0, '[CLS]')
bert_tokens_tokens.append('[SEP]')
# get pinyin of a sentence
pinyin_list = pinyin(sentence,
style=Style.TONE3,
heteronym=True,
errors=lambda x: [['not chinese'] for _ in x])
pinyin_locs = {}
# get pinyin of each location
for index, item in enumerate(pinyin_list):
pinyin_string = item[0]
# not a Chinese character, pass
if pinyin_string == "not chinese":
continue
if pinyin_string in self.pinyin2tensor:
pinyin_locs[index] = self.pinyin2tensor[pinyin_string]
else:
ids = [0] * 8
for i, p in enumerate(pinyin_string):
if p not in self.pinyin_dict["char2idx"]:
ids = [0] * 8
break
ids[i] = self.pinyin_dict["char2idx"][p]
pinyin_locs[index] = ids
# find chinese character location, and generate pinyin ids
pinyin_ids = []
for idx, (token, offset) in enumerate(
zip(bert_tokens_tokens, bert_tokens_offsets)):
if offset[1] - offset[0] != 1:
# 非单个字的token,以及 [CLS] [SEP] 特殊 token
pinyin_ids.append([0] * 8)
continue
if offset[0] in pinyin_locs:
# 单个字为token且有拼音tensor
pinyin_ids.append(pinyin_locs[offset[0]])
else:
# 单个字为token但无拼音tensor
pinyin_ids.append([0] * 8)
return pinyin_ids
def convert_tokens_to_pinyin_ids(self,
tokens: List[str]) -> List[List[int]]:
"""
Example :
tokens: ['[CLS]', '你', '多', '大', '了', '?', '[SEP]', '我', '10', '岁', '了', '。', '[SEP]']
"""
pinyin_ids = []
for token in tokens:
if token == '[CLS]' or token == '[SEP]':
# [CLS]、[SEP] 的 token
pinyin_ids.append([0] * 8)
continue
offset = self.get_offset_mapping(token)[0]
if offset[1] - offset[0] != 1:
# 非单个字组成的 token
pinyin_ids.append([0] * 8)
continue
pinyin_string = pinyin(token,
style=Style.TONE3,
heteronym=True,
errors=lambda x: [['not chinese']
for _ in x])[0][0]
if pinyin_string == "not chinese":
# 不是中文
pinyin_ids.append([0] * 8)
else:
if pinyin_string in self.pinyin2tensor:
pinyin_ids.append(self.pinyin2tensor[pinyin_string])
else:
ids = [0] * 8
for i, p in enumerate(pinyin_string):
if p not in self.pinyin_dict["char2idx"]:
ids = [0] * 8
break
ids[i] = self.pinyin_dict["char2idx"][p]
pinyin_ids.append(ids)
return pinyin_ids
@property
def vocab_size(self):
"""
Return the size of vocabulary.
Returns:
int: The size of vocabulary.
"""
return len(self.vocab)
def _tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def tokenize(self, text):
return self._tokenize(text)
def convert_tokens_to_string(self, tokens):
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def num_special_tokens_to_add(self, pair=False):
token_ids_0 = []
token_ids_1 = []
return len(
self.build_inputs_with_special_tokens(
token_ids_0, token_ids_1 if pair else None))
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
return _cls + token_ids_0 + _sep + token_ids_1 + _sep
def build_offset_mapping_with_special_tokens(self,
offset_mapping_0,
offset_mapping_1=None):
if offset_mapping_1 is None:
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
return [(0, 0)] + offset_mapping_0 + [(0, 0)
] + offset_mapping_1 + [(0, 0)]
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
_sep = [self.sep_token_id]
_cls = [self.cls_token_id]
if token_ids_1 is None:
return len(_cls + token_ids_0 + _sep) * [0]
return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 +
_sep) * [1]
def get_special_tokens_mask(self,
token_ids_0,
token_ids_1=None,
already_has_special_tokens=False):
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(
map(
lambda x: 1
if x in [self.sep_token_id, self.cls_token_id] else 0,
token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + (
[0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
``` |
{
"source": "jiaqianjing/fnet_paddle",
"score": 2
} |
#### File: transformers/fnet/tokenizer.py
```python
import os
import unicodedata
from shutil import copyfile
from typing import List, Optional
from paddle.utils import try_import
from .. import PretrainedTokenizer
__all__ = ['FNetTokenizer']
SENTENCEPIECE_UNDERLINE = "▁"
SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
class FNetTokenizer(PretrainedTokenizer):
"""
Construct an FNet tokenizer. Adapted from :class:`~transformers.AlbertTokenizer`. Based on `SentencePiece
<https://github.com/google/sentencepiece>`__. This tokenizer inherits from
:class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users should refer to this
superclass for more information regarding those methods.
Args:
vocab_file (str):
The vocabulary file (ends with '.spm') required to instantiate
a `SentencePiece <https://github.com/google/sentencepiece>`__ tokenizer.
do_lower_case (bool, optional):
Whether or not to lowercase the input when tokenizing. Defaults to `False` and
**does not** lowercase the input.
remove_space (bool, optional):
Whether or not to strip the text when tokenizing. Defaults to `True` and
removes excess spaces before and after the string.
keep_accents (bool, optional):
Whether or not to keep accents when tokenizing. Defaults to `True` and **does not** keep accents.
unk_token (str, optional):
A special token representing the *unknown (out-of-vocabulary)* token.
An unknown token is set to be `unk_token` inorder to be converted to an ID.
Defaults to `"<unk>"`.
sep_token (str, optional):
A special token separating two different sentences in the same input.
Defaults to `"[SEP]"`.
pad_token (str, optional):
A special token used to make arrays of tokens the same size for batching purposes.
Defaults to `"<pad>"`.
cls_token (str, optional):
A special token used for sequence classification. It is the last token
of the sequence when built with special tokens. Defaults to `"[CLS]"`.
mask_token (str, optional):
A special token representing a masked token. This is the token used
in the masked language modeling task which the model tries to predict the original unmasked ones.
Defaults to `"[MASK]"`.
Attributes:
sp_model (SentencePieceProcessor):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
"""
resource_files_names = {"vocab_file": "spiece.model"}
pretrained_resource_files_map = {
"vocab_file": {
"fnet-base":
"https://paddlenlp.bj.bcebos.com/models/transformers/fnet/fnet-base/spiece.model",
"fnet-large":
"https://paddlenlp.bj.bcebos.com/models/transformers/fnet/fnet-large/spiece.model",
},
}
pretrained_init_configuration = {
"fnet-base": {
"do_lower_case": False
},
"fnet-large": {
"do_lower_case": False
},
}
pretrained_positional_embedding_sizes = {
"fnet-base": None,
"fnet-large": None
}
def __init__(self,
vocab_file,
do_lower_case=False,
remove_space=True,
keep_accents=True,
unk_token="<unk>",
sep_token="[SEP]",
pad_token="<pad>",
cls_token="[CLS]",
mask_token="[MASK]",
**kwargs):
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
spm = try_import("sentencepiece")
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(vocab_file)
@property
def vocab_size(self):
return len(self.sp_model)
def get_vocab(self):
vocab = {
self.convert_ids_to_tokens(i): i
for i in range(self.vocab_size)
}
return vocab
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
spm = try_import("sentencepiece")
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def preprocess_text(self, inputs):
if self.remove_space:
outputs = " ".join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace("``", '"').replace("''", '"')
if not self.keep_accents:
outputs = unicodedata.normalize("NFKD", outputs)
outputs = "".join(
[c for c in outputs if not unicodedata.combining(c)])
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def _tokenize(self, text, sample=False):
"""Tokenize a string."""
text = self.preprocess_text(text)
if not sample:
pieces = self.sp_model.EncodeAsPieces(text)
else:
pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1)
new_pieces = []
for piece in pieces:
if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(
SPIECE_UNDERLINE, ""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][
0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
return new_pieces
def tokenize(self, text):
# """
# Converts a string to a list of tokens.
#
# Args:
# text (str):
# The text to be tokenized.
# Returns:
# List(str): A list of string representing converted tokens.
# """
return self._tokenize(text)
def _convert_token_to_id(self, token):
"""Converts a token (str) to an id using the vocab. """
return self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) to a token (str) using the vocab."""
return self.sp_model.IdToPiece(index)
def convert_tokens_to_ids(self, tokens):
"""
Converts a token (or a sequence of tokens) to a single integer id (or a sequence of ids),
using the vocabulary.
Args:
tokens (str or List[str]):
One or several token(s) to convert to token id(s).
Returns:
int or List[int] or tuple(int): The token id or list of token ids or tuple of token ids.
"""
if not isinstance(tokens, (list, tuple)):
return self._convert_token_to_id(tokens)
else:
return [self._convert_token_to_id(token) for token in tokens]
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
"""
Converts a single index or a sequence of indices to a token or
a sequence of tokens, using the vocabulary and added tokens.
Args:
ids (int or List[int]):
The token id (or token ids) to be converted to token(s).
skip_special_tokens (bool, optional):
Whether or not to remove special tokens in the decoding.
Defaults to `False` and we do not remove special tokens.
Returns:
str or List[str]: The decoded token(s).
"""
if not isinstance(ids, (list, tuple)):
return self._convert_id_to_token(ids)
tokens = [self._convert_id_to_token(_id) for _id in ids]
if skip_special_tokens:
return [
token for token in tokens
if token not in self.all_special_tokens
]
return tokens
def convert_tokens_to_string(self, tokens):
return self.sp_model.decode(tokens)
def build_inputs_with_special_tokens(
self,
token_ids_0: List[int],
token_ids_1: Optional[List[int]] = None) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An FNet sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return cls + token_ids_0 + sep
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(
self,
token_ids_0: List[int],
token_ids_1: Optional[List[int]] = None,
already_has_special_tokens: bool = False) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0,
token_ids_1=token_ids_1,
already_has_special_tokens=True)
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + (
[0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
def create_token_type_ids_from_sequences(
self,
token_ids_0: List[int],
token_ids_1: Optional[List[int]] = None) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. An FNet sequence
pair mask has the following format: ::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def save_resources(self, save_directory):
"""
Saves `SentencePiece <https://github.com/google/sentencepiece>`__ file
(ends with '.spm') under `save_directory`.
Args:
save_directory (str):
Directory to save files into.
"""
for name, file_name in self.resource_files_names.items():
save_path = os.path.join(save_directory, file_name)
if os.path.abspath(self.vocab_file) != os.path.abspath(save_path):
copyfile(self.vocab_file, save_path)
``` |
{
"source": "JiaqiaoZhang/Near_accident_car_driving_control_RL",
"score": 3
} |
#### File: Near_accident_car_driving_control_RL/envs/agents.py
```python
from .entities import RectangleEntity, CircleEntity
from .geometry import Point
# For colors, we use tkinter colors. See http://www.science.smith.edu/dftwiki/index.php/Color_Charts_for_TKinter
class Car(RectangleEntity):
def __init__(self, center: Point, heading: float, color: str = 'red'):
size = Point(4., 2.)
movable = True
friction = 0.06
super(Car, self).__init__(center, heading, size, movable, friction)
self.color = color
self.collidable = True
class Pedestrian(CircleEntity):
def __init__(self, center: Point, heading: float, color: str = 'LightSalmon2'):
radius = 0.4
movable = True
friction = 0.2
super(Pedestrian, self).__init__(center, heading, radius, movable, friction)
self.color = color
self.collidable = True
class Building(RectangleEntity):
def __init__(self, center: Point, size: Point, color: str = 'gray26'):
heading = 0.
movable = False
friction = 0.
super(Building, self).__init__(center, heading, size, movable, friction)
self.color = color
self.collidable = True
class Painting(RectangleEntity):
def __init__(self, center: Point, size: Point, color: str = 'gray26'):
heading = 0.
movable = False
friction = 0.
super(Painting, self).__init__(center, heading, size, movable, friction)
self.color = color
self.collidable = False
```
#### File: Near_accident_car_driving_control_RL/envs/entities.py
```python
import numpy as np
from .geometry import Point, Rectangle, Circle
from typing import Union
import copy
class Entity:
def __init__(self, center: Point, heading: float, movable: bool = True, friction: float = 0):
self.center = center # this is x, y
self.heading = heading
self.movable = movable
self.color = 'ghost white'
self.collidable = True
if movable:
self.friction = friction
self.velocity = Point(0,0) # this is xp, yp
self.acceleration = 0 # this is vp (or speedp)
self.angular_velocity = 0 # this is headingp
self.inputSteering = 0
self.inputAcceleration = 0
self.max_speed = np.inf
self.min_speed = 0
@property
def speed(self) -> float:
return self.velocity.norm(p = 2) if self.movable else 0
def set_control(self, inputSteering: float, inputAcceleration: float):
self.inputSteering = inputSteering
self.inputAcceleration = inputAcceleration
def tick(self, dt: float):
if self.movable:
speed = self.speed
heading = self.heading
new_angular_velocity = speed * self.inputSteering
new_acceleration = self.inputAcceleration - self.friction * speed
new_heading = heading + (self.angular_velocity + new_angular_velocity) * dt / 2.
new_speed = np.clip(speed + (self.acceleration + new_acceleration) * dt / 2., self.min_speed, self.max_speed)
new_velocity = Point(((speed + new_speed) / 2.) * np.cos((new_heading + heading) / 2.),
((speed + new_speed) / 2.) * np.sin((new_heading + heading) / 2.))
new_center = self.center + (self.velocity + new_velocity) * dt / 2.
self.center = new_center
self.heading = new_heading
self.velocity = new_velocity
self.acceleration = new_acceleration
self.angular_velocity = new_angular_velocity
self.buildGeometry()
def collidesWith(self, other) -> bool:
raise NotImplementedError
def buildGeometry(self): # builds the obj
raise NotImplementedError
def collidesWith(self, other: Union['Point','Entity']) -> bool:
if isinstance(other, Entity):
return self.obj.intersectsWith(other.obj)
elif isinstance(other, Point):
return self.obj.intersectsWith(other)
else:
raise NotImplementedError
def distanceTo(self, other: Union['Point','Entity']) -> float:
if isinstance(other, Entity):
return self.obj.distanceTo(other.obj)
elif isinstance(other, Point):
return self.obj.distanceTo(other)
else:
raise NotImplementedError
def copy(self):
return copy.deepcopy(self)
@property
def x(self):
return self.center.x
@property
def y(self):
return self.center.y
@property
def xp(self):
return self.velocity.x
@property
def yp(self):
return self.velocity.y
class RectangleEntity(Entity):
def __init__(self, center: Point, heading: float, size: Point, movable: bool = True, friction: float = 0):
super(RectangleEntity, self).__init__(center, heading, movable, friction)
self.size = size
self.buildGeometry()
@property
def edge_centers(self):
edge_centers = np.zeros((4,2), dtype=np.float32)
x = self.center.x
y = self.center.y
w = self.size.x
h = self.size.y
edge_centers[0] = [x + w / 2. * np.cos(self.heading), y + w / 2. * np.sin(self.heading)]
edge_centers[1] = [x - h / 2. * np.sin(self.heading), y + h / 2. * np.cos(self.heading)]
edge_centers[2] = [x - w / 2. * np.cos(self.heading), y - w / 2. * np.sin(self.heading)]
edge_centers[3] = [x + h / 2. * np.sin(self.heading), y - h / 2. * np.cos(self.heading)]
return edge_centers
@property
def corners(self):
ec = self.edge_centers
c = np.array([self.center.x, self.center.y])
corners = []
corners.append(Point(*(ec[1] + ec[0] - c)))
corners.append(Point(*(ec[2] + ec[1] - c)))
corners.append(Point(*(ec[3] + ec[2] - c)))
corners.append(Point(*(ec[0] + ec[3] - c)))
return corners
def buildGeometry(self):
C = self.corners
self.obj = Rectangle(*C[:-1])
class CircleEntity(Entity):
def __init__(self, center: Point, heading: float, radius: float, movable: bool = True, friction: float = 0):
super(CircleEntity, self).__init__(center, heading, movable, friction)
self.radius = radius
self.buildGeometry()
def buildGeometry(self):
self.obj = Circle(self.center, self.radius)
```
#### File: Near_accident_car_driving_control_RL/envs/scenario3.py
```python
import gym
from gym.spaces import Box, Discrete
from gym.utils import seeding
import numpy as np
from .world import World
from .agents import Car, Building, Pedestrian, Painting
from .geometry import Point
import time
class Scenario3(gym.Env):
def __init__(self):
self.seed(0) # just in case we forget seeding
self.init_ego = Car(Point(22, 10), heading = np.pi/2)
self.init_ego.velocity = Point(0, 10.)
self.init_adv = Car(Point(22, 40), heading = np.pi/2, color='blue')
self.init_adv.velocity = Point(0, 8.)
self.slowdown_point = Point(22, 80)
self.stop_duration = 3.
self.target = Point(22, 120)
self.noise_adv_pos = 1.0
self.noise_adv_vel = 1.0
self.dt = 0.1
self.T = 40
self.initiate_world()
self.reset()
def initiate_world(self):
self.world = World(self.dt, width = 40, height = 120, ppm = 5)
self.world.add(Building(Point(8, 60), Point(16, 120)))
self.world.add(Building(Point(32, 60), Point(16, 120)))
def reset(self):
self.ego = self.init_ego.copy()
self.ego.min_speed = 0.
self.ego.max_speed = 20.
self.adv = self.init_adv.copy()
self.adv.min_speed = 0.
self.adv.max_speed = 10.
self.aggressive_safe_distance = 15.
self.add_noise()
self.slowdown_t = np.inf
self.world.reset()
self.world.add(self.ego)
self.world.add(self.adv)
return self._get_obs()
def close(self):
self.world.close()
def add_noise(self):
self.ego.center += Point(0, 20*self.np_random.rand() - 10)
self.adv.center += Point(0, 20*self.np_random.rand() - 10)
self.aggressive_safe_distance += self.np_random.rand()*4 - 2
@property
def observation_space(self):
low = np.array([0, self.ego.min_speed, 30, self.adv.min_speed - self.noise_adv_vel/2.])
high= np.array([self.target.y + self.ego.max_speed*self.dt, self.ego.max_speed, self.target.y + self.adv.max_speed*self.dt, self.adv.max_speed + self.noise_adv_vel/2.])
return Box(low=low, high=high)
@property
def action_space(self):
return Box(low=np.array([-3.5]), high=np.array([2.]))
def seed(self, seed):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def get_adv_control(self):
if self.adv.y < self.slowdown_point.y:
if self.adv.speed > self.init_adv.speed:
return np.array([0, 0.], dtype=np.float32)
else:
return np.array([0, 1. + self.np_random.rand()*0.4 - 0.2], dtype=np.float32)
elif self.world.t < self.slowdown_t + self.stop_duration: # the adversarial car has just passed the slowdown point
if self.slowdown_t > self.T:
self.slowdown_t = self.world.t
return np.array([0, 1. + self.np_random.rand()*0.4 - 0.2], dtype=np.float32)
else:
return np.array([0, 2. + self.np_random.rand()*0.4 - 0.2], dtype=np.float32)
def get_ego_control(self,policy_no=0):
if policy_no==0: # aggressive
if self.adv.y - self.ego.y > np.maximum(np.minimum(self.aggressive_safe_distance, 2*self.ego.speed), 1):
return np.array([0, 1.5 + self.np_random.rand()*0.4 - 0.2], dtype=np.float32)
elif self.ego.speed < 2.:
return np.array([0, 0.], dtype=np.float32)
else:
return np.array([0, -3.4 + self.np_random.rand()*0.2 - 0.1], dtype=np.float32)
elif policy_no==1: # cautious
if self.adv.y - self.ego.y > np.maximum(2*self.ego.speed, 1):
return np.array([0, 0.5 + self.np_random.rand()*0.4 - 0.2], dtype=np.float32)
elif self.ego.speed < 2.:
return np.array([0, 0.], dtype=np.float32)
else:
return np.array([0, -2.5 + self.np_random.rand()*0.4 - 0.2], dtype=np.float32)
@property
def target_reached(self):
return self.ego.y >= self.target.y
@property
def collision_exists(self):
return self.ego.collidesWith(self.adv)
def step(self, action):
while type(action) == list:
action = action[0]
action = np.clip(action, self.action_space.low, self.action_space.high)
ego_action = np.array([0, action], dtype=np.float32)
adv_action = self.get_adv_control()
self.ego.set_control(*ego_action)
self.adv.set_control(*adv_action)
self.world.tick()
return self._get_obs(), 0, self.collision_exists or self.target_reached or self.world.t >= self.T, self.world.t
def _get_obs(self):
return np.array([self.ego.center.y, self.ego.velocity.y, self.adv.center.y + self.noise_adv_pos*self.np_random.rand() - self.noise_adv_pos/2., self.adv.velocity.y + self.noise_adv_vel*self.np_random.rand() - self.noise_adv_vel/2.])
def render(self, mode='rgb'):
self.world.render()
```
#### File: JiaqiaoZhang/Near_accident_car_driving_control_RL/train_coil_pj.py
```python
import numpy as np
import tensorflow as tf1
import tensorflow.compat.v2 as tf
import argparse
from utils import *
tf1.compat.v1.enable_eager_execution()
class NN(tf.keras.Model):
def __init__(self, in_size, out_size):
super(NN, self).__init__()
# assert tf1.executing_eagerly()
######### Your code starts here #########
# We want to define and initialize the weights & biases of the CoIL network.
# - in_size is dim(O)
# - out_size is dim(A) = 2
# HINT 1: An example of this was given to you in Homework 1's Problem 1 in svm_tf.py. Now you will implement a multi-layer version.
# HINT 2: You should use either of the following for weight initialization:
# - tf1.contrib.layers.xavier_initializer (this is what we tried)
# - tf.keras.initializers.GlorotUniform (supposedly equivalent to the previous one)
# - tf.keras.initializers.GlorotNormal
# - tf.keras.initializers.he_uniform or tf.keras.initializers.he_normal
weights_initializer = tf1.contrib.layers.xavier_initializer()
# weights_initializer = tf.keras.initializers.GlorotUniform
self.w1 = tf1.compat.v1.get_variable(name="w1", shape=(in_size, 8), initializer=weights_initializer)
self.b1 = tf1.compat.v1.get_variable(name="b1", shape=8, initializer=weights_initializer)
self.w2 = tf1.compat.v1.get_variable(name="w2", shape=(8, 16), initializer=weights_initializer)
self.b2 = tf1.compat.v1.get_variable(name="b2", shape=16, initializer=weights_initializer)
self.w3 = tf1.compat.v1.get_variable(name="w3", shape=(16, out_size), initializer=weights_initializer)
self.b3 = tf1.compat.v1.get_variable(name="b3", shape=out_size, initializer=weights_initializer)
self.w4 = tf1.compat.v1.get_variable(name="w4", shape=(8, 16), initializer=weights_initializer)
self.b4 = tf1.compat.v1.get_variable(name="b4", shape=16, initializer=weights_initializer)
self.w5 = tf1.compat.v1.get_variable(name="w5", shape=(16, out_size), initializer=weights_initializer)
self.b5 = tf1.compat.v1.get_variable(name="b5", shape=out_size, initializer=weights_initializer)
self.w6 = tf1.compat.v1.get_variable(name="w6", shape=(8, 16), initializer=weights_initializer)
self.b6 = tf1.compat.v1.get_variable(name="b6", shape=16, initializer=weights_initializer)
self.w7 = tf1.compat.v1.get_variable(name="w7", shape=(16, out_size), initializer=weights_initializer)
self.b7 = tf1.compat.v1.get_variable(name="b7", shape=out_size, initializer=weights_initializer)
########## Your code ends here ##########
def call(self, x, u):
x = tf.cast(x, dtype=tf.float32)
u = tf.cast(u, dtype=tf.int8)
######### Your code starts here #########
# We want to perform a forward-pass of the network. Using the weights and biases, this function should give the network output for (x,u) where:
# - x is a (? x |O|) tensor that keeps a batch of observations
# - u is a (? x 1) tensor (a vector indeed) that keeps the high-level commands (goals) to denote which branch of the network to use
# FYI: For the intersection scenario, u=0 means the goal is to turn left, u=1 straight, and u=2 right.
# HINT 1: Looping over all data samples may not be the most computationally efficient way of doing branching
# HINT 2: While implementing this, we found tf.math.equal and tf.cast useful. This is not necessarily a requirement though.
bach_size = len(x)
y_1 = tf.matmul(x, self.w1) - self.b1
y_1 = tf.math.tanh(y_1)
mask_0 = tf.math.equal(u, 0)
mask_0 = tf.reshape(mask_0, [bach_size])
mask_1 = tf.math.equal(u, 1)
mask_1 = tf.reshape(mask_1, [bach_size])
mask_2 = tf.math.equal(u, 2)
mask_2 = tf.reshape(mask_2, [bach_size])
y_2 = tf.boolean_mask(y_1, mask_0)
y_3 = tf.boolean_mask(y_1, mask_1)
y_4 = tf.boolean_mask(y_1, mask_2)
y_5 = tf.matmul(y_2, self.w2) - self.b2
y_5 = tf.math.sigmoid(y_5)
y_6 = tf.matmul(y_5, self.w3) - self.b3
y_7 = tf.matmul(y_3, self.w4) - self.b4
y_7 = tf.math.sigmoid(y_7)
y_8 = tf.matmul(y_7, self.w5) - self.b5
y_9 = tf.matmul(y_4, self.w6) - self.b6
y_9 = tf.math.sigmoid(y_9)
y_10 = tf.matmul(y_9, self.w7) - self.b7
# y_est = tf.concat([y_6, y_8, y_10], 0)
indices_zero = tf.cast(mask_0, dtype=tf.float32)
diag_zero = tf1.linalg.tensor_diag(indices_zero)
final_zero = tf.transpose(tf.boolean_mask(diag_zero, mask_0))
final_zero = tf.cast(final_zero, tf.float32)
indices_one = tf.cast(mask_1, dtype=tf.float32)
diag_one = tf1.linalg.tensor_diag(indices_one)
final_one = tf.transpose(tf.boolean_mask(diag_one, mask_1))
final_one = tf.cast(final_one, tf.float32)
indices_two = tf.cast(mask_2, dtype=tf.float32)
diag_two = tf1.linalg.tensor_diag(indices_two)
final_two = tf.transpose(tf.boolean_mask(diag_two, mask_2))
final_two = tf.cast(final_two, tf.float32)
y_est = tf.matmul(final_zero, y_6) + tf.matmul(final_one, y_8) + tf.matmul(final_two, y_10)
# y_est = tf.matmul(final_zero, y_6) + tf.matmul(final_one, y_8)
return y_est
########## Your code ends here ##########
def loss(y_est, y):
y = tf.cast(y, dtype=tf.float32)
######### Your code starts here #########
# We want to compute the loss between y_est and y where
# - y_est is the output of the network for a batch of observations & goals,
# - y is the actions the expert took for the corresponding batch of observations & goals
# At the end your code should return the scalar loss value.
# HINT: Remember, you can penalize steering (0th dimension) and throttle (1st dimension) unequally
loss = tf.norm(y - y_est)
# loss = tf.norm(y_est - y)
return loss
########## Your code ends here ##########
def nn(data, args):
"""
Trains a feedforward NN.
"""
params = {
'train_batch_size': 4096,
}
in_size = data['x_train'].shape[-1]
out_size = data['y_train'].shape[-1]
nn_model = NN(in_size, out_size)
if args.restore:
nn_model.load_weights('./policies/' + args.scenario.lower() + '_' + args.goal.lower() + '_CoIL')
optimizer = tf.keras.optimizers.Adam(learning_rate=args.lr)
train_loss = tf.keras.metrics.Mean(name='train_loss')
@tf.function
def train_step(x, y, u):
######### Your code starts here #########
# We want to perform a single training step (for one batch):
# 1. Make a forward pass through the model (note both x and u are inputs now)
# 2. Calculate the loss for the output of the forward pass
# 3. Based on the loss calculate the gradient for all weights
# 4. Run an optimization step on the weights.
# Helpful Functions: tf.GradientTape(), tf.GradientTape.gradient(), tf.keras.Optimizer.apply_gradients
# HINT: You did the exact same thing in Homework 1! It is just the networks weights and biases that are different.
with tf.GradientTape() as tape:
y_est = nn_model.call(x, u)
current_loss = loss(y_est, y)
varss = [nn_model.w1, nn_model.b1, nn_model.w2, nn_model.b2, nn_model.w3, nn_model.b3, nn_model.w4, nn_model.b4,
nn_model.w5, nn_model.b5, nn_model.w6, nn_model.b6, nn_model.w7, nn_model.b7]
grads = tape.gradient(current_loss, varss)
optimizer.apply_gradients(zip(grads, varss))
########## Your code ends here ##########
train_loss(current_loss)
@tf.function
def train(train_data):
for x, y, u in train_data:
train_step(x, y, u)
train_data = tf.data.Dataset.from_tensor_slices((data['x_train'], data['y_train'], data['u_train'])).shuffle(
100000).batch(params['train_batch_size'])
for epoch in range(args.epochs):
# Reset the metrics at the start of the next epoch
train_loss.reset_states()
train(train_data)
template = 'Epoch {}, Loss: {}'
print(template.format(epoch + 1, train_loss.result()))
nn_model.save_weights('./policies/' + args.scenario.lower() + '_' + args.goal.lower() + '_CoIL')
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--scenario', type=str, help="intersection, circularroad", default="intersection")
parser.add_argument("--epochs", type=int, help="number of epochs for training", default=1000)
parser.add_argument("--lr", type=float, help="learning rate for Adam optimizer", default=5e-3)
parser.add_argument("--restore", action="store_true", default=False)
args = parser.parse_args()
args.goal = 'all'
maybe_makedirs("./policies")
data = load_data(args)
nn(data, args)
``` |
{
"source": "Jiaqi-beep/cs451-practicals",
"score": 3
} |
#### File: Jiaqi-beep/cs451-practicals/dataset_poetry.py
```python
import numpy as np
from shared import (
dataset_local_path,
)
from typing import Tuple, Dict
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
# start off by seeding random number generators:
RANDOM_SEED = 12345
df: pd.DataFrame = pd.read_json(dataset_local_path("poetry_id.jsonl"), lines=True)
features = pd.json_normalize(df.features)
features = features.join([df.poetry, df.words])
tv_f, test_f = train_test_split(features, test_size=0.25, random_state=RANDOM_SEED)
train_f, vali_f = train_test_split(tv_f, test_size=0.25, random_state=RANDOM_SEED)
textual = TfidfVectorizer(max_df=0.75, min_df=2, dtype=np.float32)
numeric = make_pipeline(DictVectorizer(sparse=False), StandardScaler())
def split(
df: pd.DataFrame, fit: bool = False
) -> Tuple[np.ndarray, Dict[str, np.ndarray]]:
global numeric, textual
y = np.array(df.pop("poetry").values)
text = df.pop("words")
if fit:
textual.fit(text)
numeric.fit(df.to_dict("records"))
x_text = textual.transform(text)
x_num = numeric.transform(df.to_dict("records"))
x_merged = np.asarray(np.hstack([x_num, x_text.todense()]))
return (y, {"textual": x_text, "numeric": x_num, "merged": x_merged})
y_train, Xd_train = split(train_f, fit=True)
y_vali, Xd_vali = split(vali_f)
y_test, Xd_test = split(test_f)
```
#### File: Jiaqi-beep/cs451-practicals/fishing.py
```python
import csv
import arrow
num_var = ["length_m_gfw","engine_power_kw_gfw", "tonnage_gt_gfw"]
str_var = ["date","flag_gfw","vessel_class_gfw"]
examples = [] # training data collected from 2018
ys = [] # label is accumulative fishing hours for 2019
with open("data/2018_West_Africa.csv") as fp:
rows = csv.reader(fp)
header = next(rows)
for row in rows:
entry = dict(zip(header, row)) # glue them into a dict
if entry["fishing_hours_2018"] == "NA" or entry["fishing_hours_2019"] == "NA":
continue
ys.append(float(entry["fishing_hours_2019"]))
#print(entry) # print that dict
geometry = entry["geometry"].split(', ')
lat = float(geometry[0][2:])
lon = float(geometry[1].replace(")", ""))
temp = {}
temp["lat"] = lat
temp["lon"] = lon
#date = arrow.get(entry["date"], 'MM-DD-YYYY')
#temp["date"] = date
for name in str_var:
if entry[name] == 'NA':
continue
else:
temp[name] = entry[name]
for name in num_var:
if entry[name] == 'NA':
continue
else:
temp[name] = float(entry[name])
#temp["self_reported_fishing_vessel"] = entry["self_reported_fishing_vessel"] == "TRUE"
examples.append(temp)
#%% vectorize:
from sklearn.feature_extraction import DictVectorizer
feature_numbering = DictVectorizer(sort=True, sparse=False)
feature_numbering.fit(examples)
X = feature_numbering.transform(examples)
print("Features as {} matrix.".format(X.shape))
del examples
#%% Split data
from sklearn.model_selection import train_test_split
import numpy as np
RANDOM_SEED = 12345678
y = np.array(ys)
# split off 10% for train/validate (tv) pieces.
X_tv, rX_test, y_tv, y_test = train_test_split(
X, y, train_size=0.1, shuffle=True, random_state=RANDOM_SEED
)
# split off 50% train, validate from (tv) pieces.
rX_train, rX_vali, y_train, y_vali = train_test_split(
X_tv, y_tv, train_size=0.5, shuffle=True, random_state=RANDOM_SEED
)
#%% use MinMaxScaler to scale down X
from sklearn.preprocessing import StandardScaler, MinMaxScaler
scaling = StandardScaler()
X_train = scaling.fit_transform(rX_train)
X_vali = scaling.transform(rX_vali)
X_test = scaling.transform(rX_test)
del ys, X, y, y_tv, X_tv, rX_train, rX_vali, rX_test
print(X_train.shape, X_vali.shape, X_test.shape)
#%% Out-of-the-box model performances. Do I need a linear or nonlinear model?
#%% model Experiments
print("\n##### Do I need a linear or nonlinear model? #####")
from sklearn.utils import resample
# sample a much smaller size from the training data test different models
# and their hyperparameters
X_temp, y_temp = resample(
X_train, y_train, n_samples=1500, replace=False
)
print("training size for models: ", X_temp.shape)
# stdlib:
from dataclasses import dataclass
import json
from typing import Dict, Any, List
from sklearn.base import RegressorMixin
#%% Define & Run Experiments
@dataclass
class ExperimentResult:
vali_acc: float
params: Dict[str, Any]
model: RegressorMixin
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import SGDRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.neighbors import KNeighborsRegressor
def consider_decision_trees():
print("Consider Decision Tree")
performances: List[ExperimentResult] = []
for rnd in range(3):
for crit in ["poisson", "mse", "friedman_mse", "mae"]:
for d in range(15, 25, 1):
params = {
"criterion": crit,
"max_depth": d,
"random_state": rnd,
}
f = DecisionTreeRegressor(**params)
f.fit(X_temp, y_temp)
vali_acc = f.score(X_vali, y_vali)
result = ExperimentResult(vali_acc, params, f)
performances.append(result)
return max(performances, key=lambda result: result.vali_acc)
def consider_knn():
print("Consider knn")
performances: List[ExperimentResult] = []
for weight in ["uniform", "distance"]:
for n in range(5, 13):
params = {
"n_neighbors": n,
"weights": weight,
}
f = KNeighborsRegressor(**params)
f.fit(X_temp, y_temp)
vali_acc = f.score(X_vali, y_vali)
result = ExperimentResult(vali_acc, params, f)
performances.append(result)
return max(performances, key=lambda result: result.vali_acc)
def consider_neural_net() -> ExperimentResult:
print("Consider Multi-Layer Perceptron")
performances: List[ExperimentResult] = []
for rnd in range(5):
for solver in ["lbfgs", "sgd", "adam"]:
for alpha in range(1, 5):
params = {
"hidden_layer_sizes": (500,),
"random_state": rnd,
"solver": solver,
"alpha": alpha*0.0001,
"max_iter": 5000,
}
f = MLPRegressor(**params)
f.fit(X_temp, y_temp)
vali_acc = f.score(X_vali, y_vali)
result = ExperimentResult(vali_acc, params, f)
print(result)
performances.append(result)
return max(performances, key=lambda result: result.vali_acc)
dtree = consider_decision_trees()
knn = consider_knn()
#mlp = consider_neural_net() ## <- mlp is too slow to run and sgd spits out negative scores
print("\nBest DTree:\n", dtree)
print("Best knn:\n", knn)
#print("Best MLP:\n", mlp)
## result:
#Best DTree:
# ExperimentResult(vali_acc=0.911741239003447, params={'criterion': 'friedman_mse', 'max_depth': 21, 'random_state': 0}, model=DecisionTreeRegressor(criterion='friedman_mse', max_depth=21, random_state=0))
#Best knn:
# ExperimentResult(vali_acc=0.7033403492268693, params={'n_neighbors': 8, 'weights': 'uniform'}, model=KNeighborsRegressor(n_neighbors=8))
# Linear model does not work for this dataset. Features are highly correlated, making the linear model
# very unstable. Large number of iterations is needed to reach the depth of optimization for linear modesl,
# and it is too slow to train.
del X_temp, y_temp
from shared import simple_boxplot, bootstrap_regressor
simple_boxplot(
{
"Decision Tree": bootstrap_regressor(dtree.model, X_vali, y_vali),
"knn": bootstrap_regressor(knn.model, X_vali, y_vali),
#"MLP/NN": bootstrap_accuracy(mlp.model, X_vali, y_vali),
},
title="Validation Accuracy",
xlabel="Model",
ylabel="Mean Squared Error",
save="graphs/project/model-cmp.png",
)
## Decision tree performs better than knn for this dataset. The bootstrapped boxplot shows
# that this dataset has rather high quality without many outliers and much variance.
del dtree, knn
#%% Is my dataset large enough?
#%% Compute performance for each % of training data
print("\n##### Is my dataset large enough? #####")
N = len(y_train)
print(N)
num_trials = 80
percentages = list(range(20, 100, 20)) ## <- quite possibly this amount is enough
percentages.append(100)
scores = {}
acc_mean = []
acc_std = []
for train_percent in percentages:
n_samples = int((train_percent / 100) * N)
print("{}% == {} samples...".format(train_percent, n_samples))
label = "{}".format(n_samples, train_percent)
# So we consider num_trials=100 subsamples, and train a model on each.
scores[label] = []
for i in range(num_trials):
X_sample, y_sample = resample(
X_train, y_train, n_samples=n_samples, replace=False
) # type:ignore
clf = DecisionTreeRegressor(max_depth = 19)
#clf = SGDRegressor(random_state=RANDOM_SEED + n_samples + i)
clf.fit(X_sample, y_sample)
scores[label].append(clf.score(X_vali, y_vali))
acc_mean.append(np.mean(scores[label]))
acc_std.append(np.std(scores[label]))
# line plot with shaded variance regions
import matplotlib.pyplot as plt
# convert our list of means/std to numpy arrays to add & subtract them.
means = np.array(acc_mean)
std = np.array(acc_std)
# plot line from means
plt.plot(percentages, acc_mean, "o-")
# plot area from means & stddev
plt.fill_between(percentages, means - std, means + std, alpha=0.2)
# Manage axes/show:
plt.xlabel("Percent Training Data")
plt.ylabel("Mean Accuracy")
plt.xlim([0, 100])
plt.title("Shaded Accuracy Plot")
plt.savefig("graphs/project/fishing-area-Accuracy.png")
plt.show()
#%% boxplot to show the learning curve training data
simple_boxplot(
scores,
"Learning Curve",
xlabel="Percent Training Data",
ylabel="Accuracy",
save="graphs/project/fishing-boxplots-Accuracy.png",
)
# As the size of the dataset gets bigger, the accuracy of the decision tree model becomes a lot
# higher.
print("\n##### Thoughts on feature design / usefulness #####")
print("see comments")
# I do not have a lot of features so it is suitable to use methods in p10 to figure out the importances
# of each feature with a smaller subset of the training data. Further datapoints to add could include ecological
# variables. I don't have much insights as to what variables are more valuable than others, since I don't know
# fishing patterns.
# (1) should I group data by week to make the time series more reliable?
# The fact that my dataset spans the entire year seems to really assist the accuracy of the model
# (which also intuitively make sense). However, splitting the data by week/month would confirm this intuition.
# (2) what are the opportunities for train/test splitting on this data?
# (3) Should I be using K-fold cross-validation?
# This is definitley possible for my dataset, because of the large size of my data. I trained my models above
# on a very small subset of the dataset. The next step wcould be attempting k-fold cross-validation on the entire
# dataset to see if the hyperparameters derived still hold.
```
#### File: Jiaqi-beep/cs451-practicals/Jiaqi-explore.py
```python
from dataclasses import dataclass
from typing import List
from abc import ABC, abstractmethod
import pandas as pd
#%%
# Create dummy training data
d = {'x': [66, 106, 51, 150, 288, 319, 368, 437],
'y': [140, 166, 241, 263, 169, 130, 213, 141],
'y_actual': [True, True, True, True, False, False, False, False]}
data = pd.DataFrame(data=d)
print(data)
#%% Decision Tree class
class DTreeNode(ABC):
""" DTreeNode is an abstract class. All nodes in the Decision Tree can 'predict'. """
@abstractmethod
def predict(self, x: List[float]) -> float:
raise ValueError("This is an 'abstract' class!")
@dataclass
class DTreeBranch(DTreeNode):
""" We've decided to split based on 'feature' at the value 'split_at'. """
feature: int
split_at: float
less_than: DTreeNode
greater_than: DTreeNode
def predict(self, x: pd.DataFrame) -> float:
if x[self.feature] < self.split_at:
return self.less_than.predict(x)
else:
return self.greater_than.predict(x)
@dataclass
class DTreeLeaf(DTreeNode):
""" We've decided to stop splitting; here's our estimate of the answer. """
estimate: float
def predict(self, x: List[float]) -> float:
return self.estimate
def gini_impurity(data: pd.DataFrame) -> float:
"""
The standard version of gini impurity sums over the classes
"""
p_true = sum(data["y_actual"]) / len(data)
p_false = 1.0 - p_true
return p_true * (1 - p_true) + p_false * (1 - p_false)
def impurity_of_split(data: pd.DataFrame, feature: str, split: float) -> float:
""" Find gini index with given split """
# if we split on this feature at split, we get these two leaves:
j = k = 0
smaller = pd.DataFrame(columns = ["x", "y", "y_actual"])
bigger = pd.DataFrame(columns = ["x", "y", "y_actual"])
for i in range(len(data)):
#print(i)
if data.iloc[i][feature] < split:
smaller.loc[j] = data.iloc[i]
j += 1
else:
bigger.loc[k] = data.iloc[i]
k += 1
# weight impurity of left and right by size of dataset; this makes tiny splits less awesome.
wSmaller = len(smaller) / len(data)
wBigger = len(bigger) / len(data)
return wSmaller*gini_impurity(smaller) + wBigger*gini_impurity(bigger)
def find_candidate_splits(data: pd.DataFrame, feature: str) -> List[float]:
midpoints = []
for i in range(len(data) - 1):
first = data.iloc[i][feature]
second = data.iloc[i+1][feature]
mid = (first + second) * 0.5
midpoints.append(mid)
return midpoints
def make_leaf_node(data: pd.DataFrame):
countTrue = sum(data['y_actual'])
countFalse = len(data) - countTrue
prediction = False
if countTrue > countFalse:
prediction = True
return DTreeLeaf(prediction)
def train(data: pd.DataFrame):
countTrue = sum(data['y_actual'])
countFalse = len(data) - countTrue
if len(data) == 0 or countTrue == 0 or countFalse == 0:
return make_leaf_node(data)
best_score = 10
best_feature = ""
best_split = 0.0
for feature in ["x", "y"]:
splits = find_candidate_splits(data, feature)
for split in splits:
score = impurity_of_split(data, feature, split)
if best_score == 10 or score <= best_score:
best_score = score
best_feature = feature
best_split = split
j = k = 0
left = pd.DataFrame(columns = ["x", "y", "y_actual"])
right = pd.DataFrame(columns = ["x", "y", "y_actual"])
#print("score is {}, data size is {}, best_split is {}, best_feature is {}".format(score, len(data), best_split, best_feature))
for i in range(len(data)):
if data.iloc[i][best_feature] < best_split:
left.loc[j] = data.iloc[i]
j += 1
else:
right.loc[k] = data.iloc[i]
k += 1
if len(left) == 0 or len(right) == 0:
return make_leaf_node(data)
return DTreeBranch(best_feature, best_split, train(left), train(right))
#m = DTreeBranch(0, 0.5, DTreeLeaf(1.0), DTreeLeaf(0.0))
m = train(data)
assert True == m.predict({"x": 1, "y": 10})
assert True == m.predict({"x": 123, "y": 200})
assert False == m.predict({"x": 500, "y": 200})
assert False == m.predict({"x": 432, "y": 200})
print("no assertion errors")
```
#### File: Jiaqi-beep/cs451-practicals/p02-dtree-sk.py
```python
from sklearn.tree import DecisionTreeClassifier
import json # standard python
from shared import dataset_local_path, TODO # helper functions I made
#%% load up the data
examples = []
feature_names = set([])
with open(dataset_local_path("poetry_id.jsonl")) as fp:
for line in fp:
info = json.loads(line)
# Note: the data contains a whole bunch of extra stuff; we just want numeric features for now.
keep = info["features"]
# make a big list of all the features we have:
for name in keep.keys():
feature_names.add(name)
# whether or not it's poetry is our label.
keep["y"] = info["poetry"]
# hold onto this single dictionary.
examples.append(keep)
print(examples[16])
#%% Convert data to 'matrices'
# NOTE: there are better ways to do this, built-in to scikit-learn. We will see them soon.
# turn the set of 'string' feature names into a list (so we can use their order as matrix columns!)
feature_order = sorted(feature_names)
# Set up our ML problem:
train_y = []
train_X = []
# Put every other point in a 'held-out' set for testing...
test_y = []
test_X = []
for i, row in enumerate(examples):
# grab 'y' and treat it as our label.
example_y = row["y"]
# create a 'row' of our X matrix:
example_x = []
for feature_name in feature_order:
example_x.append(float(row[feature_name]))
# put every fourth page into the test set:
if i % 4 == 0:
test_X.append(example_x)
test_y.append(example_y)
else:
train_X.append(example_x)
train_y.append(example_y)
print(
"There are {} training examples and {} testing examples.".format(
len(train_y), len(test_y)
)
)
#%% Now actually train the model...
# Create a regression-tree object:
f = DecisionTreeClassifier(
splitter="random",
max_features=None,
criterion="entropy",
max_depth=8,
random_state=777,
) # type:ignore
# train the tree!
f.fit(train_X, train_y)
# did it memorize OK?
print("Score on Training: {:.3f}".format(f.score(train_X, train_y)))
print("Score on Testing: {:.3f}".format(f.score(test_X, test_y)))
########### Actual 'practical' assignment ############
#splitter{“best”, “random”}, default=best --- strategy for split at each level
#max_features --- number of features to consider when looking for the best split
#criterion --- gini or entropy --- determine the quality of the split
#max_depth --- maximum depth of the tree
#radom_state --- controls randomness of the estimator
# Consult the documentation: https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html
#2. Pick one parameter, vary it, and find some version of the 'best' setting
import random
params = {
"criterion": random.choice(["gini","entropy"]),
"splitter": random.choice(["best","random"]),
}
def train_and_test():
f = DecisionTreeClassifier(
splitter="best",
max_features=None,
criterion="gini",
max_depth=None,
random_state=13,
) # type:ignore
# train the tree!
f.fit(train_X, train_y)
# did it memorize OK?
print("Score on Training: {:.3f}".format(f.score(train_X, train_y)))
print("Score on Testing: {:.3f}".format(f.score(test_X, test_y)))
# Default performance:
# There are 2079 training examples and 693 testing examples.
# Score on Training: 1.000
# Score on Testing: 0.889
TODO("3. Leave clear code for running your experiment!")
``` |
{
"source": "jiaqi-jiang/photonics-opt-testbed",
"score": 3
} |
#### File: photonics-opt-testbed/RGB_metalens/ruler.py
```python
import numpy as np
# Some code is copied or adapted from the Meep code at https://github.com/smartalecH/meep/blob/jax_rebase/python/adjoint/filters.py
class morph:
def __init__(self,size,margin_size=np.zeros((2,2)),proj_strength=10**6):
self.size = size # physical size in the row and column directions
self.margin_size = np.reshape(margin_size,(2,2)) # widths of marginal regions to be ignored
#in margin_size, the two elements in the first row correspond to the top and bottom, and the two elements in the second row correspond to the left and right
self.proj_strength = proj_strength # This is a parameter for the functions heaviside_erosion and heaviside_dilation
def cylindrical_filter(self,arr,radius):
# Construct the grid over the entire design region
xv, yv = np.meshgrid(np.linspace(
-self.size[0]/2,self.size[0]/2,arr.shape[0]), np.linspace(-self.size[1]/2,self.size[1]/2,arr.shape[1]), sparse=True, indexing='ij')
# Calculate the kernel
kernel = np.where(np.abs(xv ** 2 + yv ** 2) <= radius**2,1,0)#.T
# Normalize the kernel
kernel = kernel / np.sum(kernel.flatten()) # Normalize the filter
# Filter the response
arr_out = simple_2d_filter(arr,kernel,arr.shape[0],arr.shape[1])
return arr_out
def heaviside_erosion(self,arr,radius):
beta = self.proj_strength
arr_hat = self.cylindrical_filter(arr,radius)
return np.exp(-beta*(1-arr_hat)) + np.exp(-beta)*(1-arr_hat)
def heaviside_dilation(self,arr,radius):
beta = self.proj_strength
arr_hat = self.cylindrical_filter(arr,radius)
return 1 - np.exp(-beta*arr_hat) + np.exp(-beta)*arr_hat
def open_operator(self,arr,radius):
# erosion and then dilation
he = self.heaviside_erosion(arr,radius)
hdhe = self.heaviside_dilation(he,radius)
return hdhe
def close_operator(self,arr,radius):
# dilation and then erosion
hd = self.heaviside_dilation(arr,radius)
hehd = self.heaviside_erosion(hd,radius)
return hehd
def margin(self,arr): # compute the numbers of pixels corresponding to the marginal widths
margin_number = np.round(np.vstack((self.margin_size[0,:]/self.size[0]*arr.shape[0],self.margin_size[1,:]/self.size[1]*arr.shape[1])))
if np.sum(margin_number[0,:])+3>arr.shape[0] or np.sum(margin_number[1,:])+3>arr.shape[1]:
raise ValueError("Too wide margin or too narrow design region!")
for ii in range(margin_number.shape[0]):
for jj in range(margin_number.shape[1]):
if margin_number[ii,jj]==0:
margin_number[ii,jj] = 1
if margin_number[ii,jj]<0:
raise ValueError("Margin widths should be positive!")
return margin_number.astype(int)
def minimum_length(self,arr,len_arr=None):
arr = binarize(arr)
margin_number = self.margin(arr)
if np.array(len_arr).any(): # search the minimum length scale within a length array "len_arr"
radius_list = sorted(list(np.abs(len_arr)/2))
for radius in radius_list:
diff_image = np.abs(self.open_operator(arr,radius)-self.close_operator(arr,radius)) # difference between open and close
pixel_in = in_pixel_count(diff_image,margin_number=margin_number)
if pixel_in>0:
print("The minimum length scale is ",radius*2)
return radius*2
print("The minimum length scale is not in this array of lengths.")
return
else: # find the minimum length scale via binary search if "len_arr" is not provided
radius_ub = min(self.size[0],self.size[1])/2 # largest meaningful radius of open and close operations
diff_image_ub = np.abs(self.open_operator(arr,radius_ub)-self.close_operator(arr,radius_ub)) # difference between open and close
pixel_in_ub = in_pixel_count(diff_image_ub,margin_number=margin_number)
if pixel_in_ub>0:
radii = [0,radius_ub/2,radius_ub]
while np.abs(radii[0]-radii[2])>min(np.array(self.size)/np.array(arr.shape))/2:
radius = radii[1]
diff_image = np.abs(self.open_operator(arr,radius)-self.close_operator(arr,radius)) # difference between open and close
pixel_in = in_pixel_count(diff_image,margin_number=margin_number)
if pixel_in==0: radii[0],radii[1] = radius,(radius+radii[2])/2
else: radii[1],radii[2] = (radius+radii[0])/2,radius
return radii[1]*2
else:
print("The minimum length scale is at least ", radius_ub*2)
return
def binarize(arr,demarcation=0.5):
arr_normalized = (arr-min(arr.flatten()))/(max(arr.flatten())-min(arr.flatten())) # normalize the data of the image
arr_binarized = np.sign(arr_normalized-demarcation)/2+0.5 # binarize the data of the image with the threshold 0.5
return arr_binarized
def simple_2d_filter(arr,kernel,Nx,Ny):
# Get 2d parameter space shape
(kx,ky) = kernel.shape
# Ensure the input is 2D
arr = arr.reshape(Nx,Ny)
# pad the kernel and input to avoid circular convolution and
# to ensure boundary conditions are met.
kernel = _zero_pad(kernel,((kx,kx),(ky,ky)))
arr = _edge_pad(arr,((kx,kx),(ky,ky)))
# Transform to frequency domain for fast convolution
K = np.fft.fft2(kernel)
A = np.fft.fft2(arr)
# Convolution (multiplication in frequency domain)
KA = K * A
# We need to fftshift since we padded both sides if each dimension of our input and kernel.
arr_out = np.fft.fftshift(np.real(np.fft.ifft2(KA)))
# Remove all the extra padding
arr_out = _centered(arr_out,(kx,ky))
return arr_out
def _centered(arr, newshape):
'''Helper function that reformats the padded array of the fft filter operation.
Borrowed from scipy:
https://github.com/scipy/scipy/blob/v1.4.1/scipy/signal/signaltools.py#L263-L270
'''
# Return the center newshape portion of the array.
newshape = np.asarray(newshape)
currshape = np.array(arr.shape)
startind = (currshape - newshape) // 2
endind = startind + newshape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _edge_pad(arr, pad):
# fill sides
left = np.tile(arr[0,:],(pad[0][0],1)) # left side
right = np.tile(arr[-1,:],(pad[0][1],1)) # right side
top = np.tile(arr[:,0],(pad[1][0],1)).transpose() # top side
bottom = np.tile(arr[:,-1],(pad[1][1],1)).transpose() # bottom side)
# fill corners
top_left = np.tile(arr[0,0], (pad[0][0],pad[1][0])) # top left
top_right = np.tile(arr[-1,0], (pad[0][1],pad[1][0])) # top right
bottom_left = np.tile(arr[0,-1], (pad[0][0],pad[1][1])) # bottom left
bottom_right = np.tile(arr[-1,-1], (pad[0][1],pad[1][1])) # bottom right
out = np.concatenate((
np.concatenate((top_left,top,top_right)),
np.concatenate((left,arr,right)),
np.concatenate((bottom_left,bottom,bottom_right))
),axis=1)
return out
def _zero_pad(arr, pad):
# fill sides
left = np.tile(0,(pad[0][0],arr.shape[1])) # left side
right = np.tile(0,(pad[0][1],arr.shape[1])) # right side
top = np.tile(0,(arr.shape[0],pad[1][0])) # top side
bottom = np.tile(0,(arr.shape[0],pad[1][1])) # bottom side
# fill corners
top_left = np.tile(0, (pad[0][0],pad[1][0])) # top left
top_right = np.tile(0, (pad[0][1],pad[1][0])) # top right
bottom_left = np.tile(0, (pad[0][0],pad[1][1])) # bottom left
bottom_right = np.tile(0, (pad[0][1],pad[1][1])) # bottom right
out = np.concatenate((
np.concatenate((top_left,top,top_right)),
np.concatenate((left,arr,right)),
np.concatenate((bottom_left,bottom,bottom_right))
),axis=1)
return out
def in_pixel_count(arr,margin_number=np.ones((2,2),dtype=int),threshold=0.5):
# if the value of a pixel exceeds the threshold, it is regarded as nonzero
pixel_int = 0 # number of interior pixels with nonzero values
for ii in range(margin_number[0,0],arr.shape[0]-margin_number[0,1]):
for jj in range(margin_number[1,0],arr.shape[1]-margin_number[1,1]):
if arr[ii-1,jj]>threshold and arr[ii+1,jj]>threshold and arr[ii,jj-1]>threshold and arr[ii,jj+1]>threshold:
pixel_int += 1
return pixel_int # numbers of interior pixels with nonzero values
def pixel_count(arr,threshold): # if the value of a pixel exceeds the threshold, it is regarded as nonzero
pixel_tot,pixel_int = 0,0 # number of pixels with nonzero values, and among which the number of interior pixels
for ii in range(arr.shape[0]):
for jj in range(arr.shape[1]):
if arr[ii,jj]>threshold:
pixel_tot += 1
if ii>0 and ii<arr.shape[0]-1 and jj>0 and jj<arr.shape[1]-1:
if arr[ii-1,jj]>threshold and arr[ii+1,jj]>threshold and arr[ii,jj-1]>threshold and arr[ii,jj+1]>threshold:
pixel_int += 1
return pixel_tot-pixel_int,pixel_int # numbers of exterior and interior pixels with nonzero values
``` |
{
"source": "JiaqiLiu/PyTorch-NLP",
"score": 3
} |
#### File: encoders/text/test_spacy_encoder.py
```python
import pickle
import pytest
from torchnlp.encoders.text import SpacyEncoder
@pytest.fixture
def input_():
return ('This is a sentence')
@pytest.fixture
def encoder(input_):
return SpacyEncoder([input_])
def test_spacy_encoder(encoder, input_):
tokens = encoder.encode(input_)
assert encoder.decode(tokens) == input_
def test_spacy_encoder_issue_44():
# https://github.com/PetrochukM/PyTorch-NLP/issues/44
encoder = SpacyEncoder(["This ain't funny."])
assert 'ai' in encoder.vocab
assert 'n\'t' in encoder.vocab
def test_spacy_encoder_batch(encoder, input_):
tokens, _ = encoder.batch_encode([input_, input_])
assert encoder.decode(tokens[0]) == input_
assert encoder.decode(tokens[1]) == input_
def test_spacy_encoder_not_installed_language():
error_message = ''
try:
SpacyEncoder([], language='fr')
except Exception as e:
error_message = str(e)
assert error_message.startswith("Language 'fr' not found.")
def test_spacy_encoder_unsupported_language():
error_message = ''
try:
SpacyEncoder([], language='python')
except Exception as e:
error_message = str(e)
assert error_message.startswith("No tokenizer available for language " + "'python'.")
def test_is_pickleable(encoder):
pickle.dumps(encoder)
```
#### File: encoders/text/test_text_encoder.py
```python
import pytest
import torch
from torchnlp.encoders.text import DEFAULT_PADDING_INDEX
from torchnlp.encoders.text import stack_and_pad_tensors
from torchnlp.encoders.text import pad_tensor
def test_pad_tensor():
padded = pad_tensor(torch.LongTensor([1, 2, 3]), 5, DEFAULT_PADDING_INDEX)
assert padded.tolist() == [1, 2, 3, DEFAULT_PADDING_INDEX, DEFAULT_PADDING_INDEX]
def test_pad_tensor_multiple_dim():
padded = pad_tensor(torch.LongTensor(1, 2, 3), 5, DEFAULT_PADDING_INDEX)
assert padded.size() == (5, 2, 3)
assert padded[1].sum().item() == pytest.approx(0)
def test_pad_tensor_multiple_dim_float_tensor():
padded = pad_tensor(torch.FloatTensor(778, 80), 804, DEFAULT_PADDING_INDEX)
assert padded.size() == (804, 80)
assert padded[-1].sum().item() == pytest.approx(0)
assert padded.type() == 'torch.FloatTensor'
def test_stack_and_pad_tensors():
batch = [torch.LongTensor([1, 2, 3]), torch.LongTensor([1, 2]), torch.LongTensor([1])]
padded, lengths = stack_and_pad_tensors(batch, DEFAULT_PADDING_INDEX)
padded = [r.tolist() for r in padded]
assert padded == [[1, 2, 3], [1, 2, DEFAULT_PADDING_INDEX],
[1, DEFAULT_PADDING_INDEX, DEFAULT_PADDING_INDEX]]
assert lengths.tolist() == [3, 2, 1]
def test_stack_and_pad_tensors__dim():
batch_size = 3
batch = [torch.LongTensor([1, 2, 3, 4]), torch.LongTensor([1, 2, 3]), torch.LongTensor([1, 2])]
padded, lengths = stack_and_pad_tensors(batch, DEFAULT_PADDING_INDEX, dim=1)
assert padded.shape == (4, batch_size)
assert lengths.shape == (1, batch_size)
assert lengths.tolist() == [[4, 3, 2]]
assert padded.tolist() == [[1, 1, 1], [2, 2, 2], [3, 3, DEFAULT_PADDING_INDEX],
[4, DEFAULT_PADDING_INDEX, DEFAULT_PADDING_INDEX]]
```
#### File: tests/samplers/test_bptt_batch_sampler.py
```python
import pickle
import string
import pytest
from torchnlp.samplers import BPTTBatchSampler
from torchnlp.utils import sampler_to_iterator
@pytest.fixture
def alphabet():
return list(string.ascii_lowercase)
@pytest.fixture
def sampler(alphabet):
return BPTTBatchSampler(alphabet, bptt_length=2, batch_size=4, drop_last=True)
def test_bptt_batch_sampler_drop_last(sampler, alphabet):
# Test samplers iterate over chunks similar to:
# https://github.com/pytorch/examples/blob/c66593f1699ece14a4a2f4d314f1afb03c6793d9/word_language_model/main.py#L112
list_ = list(sampler_to_iterator(alphabet, sampler))
assert list_[0] == [['a', 'b'], ['g', 'h'], ['m', 'n'], ['s', 't']]
assert len(sampler) == len(list_)
def test_bptt_batch_sampler(alphabet):
sampler = BPTTBatchSampler(alphabet, bptt_length=2, batch_size=4, drop_last=False)
list_ = list(sampler_to_iterator(alphabet, sampler))
assert list_[0] == [['a', 'b'], ['h', 'i'], ['o', 'p'], ['u', 'v']]
assert len(sampler) == len(list_)
def test_bptt_batch_sampler_example():
sampler = BPTTBatchSampler(range(100), bptt_length=2, batch_size=3, drop_last=False)
assert list(sampler)[0] == [slice(0, 2), slice(34, 36), slice(67, 69)]
sampler = BPTTBatchSampler(
range(100), bptt_length=2, batch_size=3, drop_last=False, type_='target')
assert list(sampler)[0] == [slice(1, 3), slice(35, 37), slice(68, 70)]
def test_is_pickleable(sampler):
pickle.dumps(sampler)
```
#### File: tests/samplers/test_bptt_sampler.py
```python
import pickle
import random
import pytest
from torchnlp.samplers import BPTTSampler
@pytest.fixture
def sampler():
return BPTTSampler(range(5), 2)
def test_bptt_sampler_odd(sampler):
assert list(sampler) == [slice(0, 2), slice(2, 4)]
assert len(sampler) == 2
def test_bptt_sampler_even():
sampler = BPTTSampler(range(6), 2, type_='target')
assert list(sampler) == [slice(1, 3), slice(3, 5), slice(5, 6)]
assert len(sampler) == 3
def test_bptt_sampler_length():
for i in range(1, 1000):
sampler = BPTTSampler(range(i), random.randint(1, 17))
assert len(sampler) == len(list(sampler))
def test_is_pickleable(sampler):
pickle.dumps(sampler)
```
#### File: PyTorch-NLP/tests/test_download.py
```python
import urllib.request
from tqdm import tqdm
from torchnlp.download import _get_filename_from_url
from torchnlp.download import _reporthook
def test_get_filename_from_url():
assert 'aclImdb_v1.tar.gz' in _get_filename_from_url(
'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz')
assert 'SimpleQuestions_v2.tgz' in _get_filename_from_url(
'https://www.dropbox.com/s/tohrsllcfy7rch4/SimpleQuestions_v2.tgz?raw=1')
def test_reporthook():
# Check that reporthook works with URLLIB
with tqdm(unit='B', unit_scale=True, miniters=1) as t:
urllib.request.urlretrieve('http://google.com', reporthook=_reporthook(t))
```
#### File: tests/word_to_vector/test_fast_text.py
```python
import os
import mock
from torchnlp.word_to_vector import FastText
from tests.word_to_vector.utils import urlretrieve_side_effect
@mock.patch('urllib.request.urlretrieve')
def test_fasttext_simple(mock_urlretrieve):
directory = 'tests/_test_data/fast_text/'
# Make sure URL has a 200 status
mock_urlretrieve.side_effect = urlretrieve_side_effect
# Attempt to parse a subset of FastText
vectors = FastText(language='simple', cache=directory)
assert len(vectors['the']) == 300
assert len(vectors) > 1
# Test cache and `is_include`
vectors = FastText(language='simple', is_include=lambda w: w == 'the', cache=directory)
assert 'the' in vectors.stoi
assert len(vectors) == 1
# Test implementation of __contains()__
assert 'the' in vectors
# Test with the unknown characters
assert len(vectors['漢字']) == 300
# Clean up
os.remove(os.path.join(directory, 'wiki.simple.vec.pt'))
@mock.patch('urllib.request.urlretrieve')
def test_fasttext_list_arguments(mock_urlretrieve):
directory = 'tests/_test_data/fast_text/'
# Make sure URL has a 200 status
mock_urlretrieve.side_effect = urlretrieve_side_effect
# Load subset of FastText
vectors = FastText(language='simple', cache=directory)
# Test implementation of __getitem()__ for token list and tuple
list(vectors[['the', 'of']].shape) == [2, 300]
list(vectors[('the', 'of')].shape) == [2, 300]
# Clean up
os.remove(os.path.join(directory, 'wiki.simple.vec.pt'))
@mock.patch('urllib.request.urlretrieve')
def test_fasttext_non_list_or_tuple_raises_type_error(mock_urlretrieve):
directory = 'tests/_test_data/fast_text/'
# Make sure URL has a 200 status
mock_urlretrieve.side_effect = urlretrieve_side_effect
# Load subset of FastText
vectors = FastText(language='simple', cache=directory)
# Test implementation of __getitem()__ for invalid type
error_class = None
try:
vectors[None]
except Exception as e:
error_class = e.__class__
assert error_class is TypeError
# Clean up
os.remove(os.path.join(directory, 'wiki.simple.vec.pt'))
@mock.patch('urllib.request.urlretrieve')
def test_aligned_fasttext(mock_urlretrieve):
directory = 'tests/_test_data/fast_text/'
# Make sure URL has a 200 status
mock_urlretrieve.side_effect = urlretrieve_side_effect
# Parse the aligned FastText embeddings
vectors = FastText(aligned=True, cache=directory)
# Assert the embeddings' dimensionality
assert len(vectors['the']) == 300
# Our test file contains only five words to keep the file size small
assert len(vectors) == 5
# Clean up
os.remove(os.path.join(directory, 'wiki.en.align.vec.pt'))
```
#### File: torchnlp/datasets/trec.py
```python
import os
from torchnlp.download import download_files_maybe_extract
from torchnlp.datasets.dataset import Dataset
def trec_dataset(directory='data/trec/',
train=False,
test=False,
train_filename='train_5500.label',
test_filename='TREC_10.label',
check_files=['train_5500.label'],
urls=[
'http://cogcomp.org/Data/QA/QC/train_5500.label',
'http://cogcomp.org/Data/QA/QC/TREC_10.label'
],
fine_grained=False):
"""
Load the Text REtrieval Conference (TREC) Question Classification dataset.
TREC dataset contains 5500 labeled questions in training set and another 500 for test set. The
dataset has 6 labels, 50 level-2 labels. Average length of each sentence is 10, vocabulary size
of 8700.
References:
* https://nlp.stanford.edu/courses/cs224n/2004/may-steinberg-project.pdf
* http://cogcomp.org/Data/QA/QC/
* http://www.aclweb.org/anthology/C02-1150
**Citation:**
<NAME>, <NAME>, Learning Question Classifiers. COLING'02, Aug., 2002.
Args:
directory (str, optional): Directory to cache the dataset.
train (bool, optional): If to load the training split of the dataset.
test (bool, optional): If to load the test split of the dataset.
train_filename (str, optional): The filename of the training split.
test_filename (str, optional): The filename of the test split.
check_files (str, optional): Check if these files exist, then this download was successful.
urls (str, optional): URLs to download.
Returns:
:class:`tuple` of :class:`torchnlp.datasets.Dataset` or :class:`torchnlp.datasets.Dataset`:
Returns between one and all dataset splits (train, dev and test) depending on if their
respective boolean argument is ``True``.
Example:
>>> from torchnlp.datasets import trec_dataset # doctest: +SKIP
>>> train = trec_dataset(train=True) # doctest: +SKIP
>>> train[:2] # doctest: +SKIP
[{
'label': 'DESC',
'text': 'How did serfdom develop in and then leave Russia ?'
}, {
'label': 'ENTY',
'text': 'What films featured the character Popeye Doyle ?'
}]
"""
download_files_maybe_extract(urls=urls, directory=directory, check_files=check_files)
ret = []
splits = [(train, train_filename), (test, test_filename)]
splits = [f for (requested, f) in splits if requested]
for filename in splits:
full_path = os.path.join(directory, filename)
examples = []
for line in open(full_path, 'rb'):
# there is one non-ASCII byte: sisterBADBYTEcity; replaced with space
label, _, text = line.replace(b'\xf0', b' ').strip().decode().partition(' ')
label, _, label_fine = label.partition(':')
if fine_grained:
examples.append({'label': label_fine, 'text': text})
else:
examples.append({'label': label, 'text': text})
ret.append(Dataset(examples))
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
```
#### File: encoders/text/text_encoder.py
```python
import torch
from torchnlp.encoders import Encoder
from torchnlp.encoders.text.default_reserved_tokens import DEFAULT_PADDING_INDEX
def pad_tensor(tensor, length, padding_index=DEFAULT_PADDING_INDEX):
""" Pad a ``tensor`` to ``length`` with ``padding_index``.
Args:
tensor (torch.Tensor [n, ...]): Tensor to pad.
length (int): Pad the ``tensor`` up to ``length``.
padding_index (int, optional): Index to pad tensor with.
Returns
(torch.Tensor [length, ...]) Padded Tensor.
"""
n_padding = length - tensor.shape[0]
assert n_padding >= 0
if n_padding == 0:
return tensor
padding = tensor.new(n_padding, *tensor.shape[1:]).fill_(padding_index)
return torch.cat((tensor, padding), dim=0)
def stack_and_pad_tensors(batch, padding_index=DEFAULT_PADDING_INDEX, dim=0):
""" Pad a :class:`list` of ``tensors`` (``batch``) with ``padding_index``.
Args:
batch (:class:`list` of :class:`torch.Tensor`): Batch of tensors to pad.
padding_index (int, optional): Index to pad tensors with.
dim (int, optional): Dimension on to which to concatenate the batch of tensors.
Returns
torch.Tensor, torch.Tensor: Padded tensors and original lengths of tensors.
"""
lengths = [tensor.shape[0] for tensor in batch]
max_len = max(lengths)
padded = [pad_tensor(tensor, max_len, padding_index) for tensor in batch]
lengths = torch.tensor(lengths)
padded = torch.stack(padded, dim=dim).contiguous()
for _ in range(dim):
lengths = lengths.unsqueeze(0)
return padded, lengths
class TextEncoder(Encoder):
def batch_encode(self, iterator, *args, dim=0, **kwargs):
"""
Args:
iterator (iterator): Batch of text to encode.
*args: Arguments passed onto ``Encoder.__init__``.
dim (int, optional): Dimension along which to concatenate tensors.
**kwargs: Keyword arguments passed onto ``Encoder.__init__``.
Returns
torch.Tensor, list of int: Encoded and padded batch of sequences; Original lengths of
sequences.
"""
return stack_and_pad_tensors(
super().batch_encode(iterator), padding_index=self.padding_index, dim=dim)
def batch_decode(self, tensor, lengths, dim=0, *args, **kwargs):
"""
Args:
batch (list of :class:`torch.Tensor`): Batch of encoded sequences.
lengths (list of int): Original lengths of sequences.
dim (int, optional): Dimension along which to split tensors.
*args: Arguments passed to ``decode``.
**kwargs: Key word arguments passed to ``decode``.
Returns:
list: Batch of decoded sequences.
"""
return super().batch_decode(
[t.squeeze(0)[:l] for t, l in zip(tensor.split(1, dim=dim), lengths)])
```
#### File: torchnlp/samplers/bptt_batch_sampler.py
```python
import math
from torchnlp.samplers.bptt_sampler import BPTTSampler
class BPTTBatchSampler(object):
"""Samples sequentially a batch of source and target slices of size ``bptt_length``.
Typically, such a sampler, is used for language modeling training with backpropagation through
time (BPTT).
**Reference:**
https://github.com/pytorch/examples/blob/c66593f1699ece14a4a2f4d314f1afb03c6793d9/word_language_model/main.py#L61
Args:
data (iterable): Iterable data.
bptt_length (int): Length of the slice.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if its size would be
less than ``batch_size``.
type_ (str, optional): Type of batch ['source'|'target'] to load where a target batch is one
timestep ahead
Example:
>>> sampler = BPTTBatchSampler(range(100), bptt_length=2, batch_size=3, drop_last=False)
>>> list(sampler)[0] # First Batch
[slice(0, 2, None), slice(34, 36, None), slice(67, 69, None)]
"""
def __init__(self, data, bptt_length, batch_size, drop_last, type_='source'):
self.data = data
self.batch_size = batch_size
self.drop_last = drop_last
# For each row in the batch, we iterate over a chunk of size `chunk_size`
# Our chunks are similar to the columns in this PyTorch example:
# https://github.com/pytorch/examples/blob/c66593f1699ece14a4a2f4d314f1afb03c6793d9/word_language_model/main.py#L61
chunk_sizes = [math.floor(len(data) / batch_size)] * batch_size
# Distribute the remaining elements to some chunks
if not self.drop_last:
remainder = len(data) - sum(chunk_sizes)
for i in range(remainder):
chunk_sizes[i] += 1
self.samplers = [{
'offset': sum(chunk_sizes[:i]),
'sampler': BPTTSampler(range(chunk_sizes[i]), bptt_length, type_=type_)
} for i in range(batch_size)]
def __iter__(self):
# Samplers iterate over chunks similar to:
# https://github.com/pytorch/examples/blob/c66593f1699ece14a4a2f4d314f1afb03c6793d9/word_language_model/main.py#L112
self.iterators = [iter(value['sampler']) for value in self.samplers]
while True:
batch = []
for i, iterator in enumerate(self.iterators):
try:
# Adjust the sampler indices to the offset
offset = self.samplers[i]['offset']
slice_ = next(iterator)
batch.append(slice(slice_.start + offset, slice_.stop + offset))
except StopIteration:
pass
# Samplers are all empty
if (len(batch) == 0):
break
yield batch
def __len__(self):
return len(self.samplers[0]['sampler'])
``` |
{
"source": "JiaqiLiZju/NvTK",
"score": 3
} |
#### File: NvTK/NvTK/config.py
```python
import json
import logging
from copy import deepcopy
import NvTK
from NvTK.Data.Dataset import generate_dataloader_from_datasets
from torch import nn
from torch import optim
def load_config_from_json(fname):
with open(fname) as f:
config = json.load(f)
return config
def dump_config_to_json(config, fname='config_dump.json'):
with open(fname, 'w') as f:
f.write(json.dumps(config, indent=4))
def generate_dataloader_from_config(config):
data_type = config['data'].get('type', 'h5Dataset')
fname = config['data']['fpath']
train_loader, validate_loader, test_loader = generate_dataloader_from_datasets(fname, batch_size = 16)
return train_loader, validate_loader, test_loader
def parse_model_args(args):
'''help-function of get_model_from_config'''
args = deepcopy(args)
for k, v in args.items():
if k in ["pool", "activation"]:
if hasattr(nn, v):
args[k] = getattr(nn, v)
elif hasattr(NvTK, v):
args[k] = getattr(NvTK, v)
else:
logging.error("model args[%s]=%s not valid!"%(k,v))
raise ValueError
return args
def get_model_from_config(config):
model_type = config['model'].get('type', 'CNN')
model_args = config['model']['args']
model_args = parse_model_args(model_args)
if hasattr(NvTK, model_type):
model = getattr(NvTK, model_type)(**model_args)
return model
def get_optimizer_from_config(config, model):
if 'optimizer' in config:
optimizer_type = config['optimizer']['type']
args = config['optimizer'].get('args', {"lr":1e-4})
else:
optimizer_type = 'Adam'
args = {"lr":1e-4}
if hasattr(optim, optimizer_type):
optimizer = getattr(optim, optimizer_type)(model.parameters(), **args)
return optimizer
def get_criterion_from_config(config):
if 'criterion' in config:
criterion_type = config['criterion']['type']
args = config['criterion'].get('args', {})
else:
if config['model']['tasktype'] == 'regression':
criterion_type = 'MSELoss'
args = {}
else:
criterion_type = 'BCELoss'
args = {}
if hasattr(nn, criterion_type):
criterion = getattr(nn, criterion_type)(**args)
elif hasattr(NvTK.Modules.Loss, criterion_type):
criterion = getattr(NvTK.Modules.Loss, criterion_type)(**args)
return criterion
def parse_trainer_args(config):
trainer_args = {}
if "trainer" in config:
if "args" in config["trainer"]:
trainer_args = config["trainer"]["args"]
return trainer_args
def parse_modes_from_config(config):
if 'modes' in config:
return [k for k in config['modes']]
else:
return ["hpo", "train", "evaluate", "explain"]
```
#### File: NvTK/Data/Augment.py
```python
import numpy as np
def seq_rc_augment(X, y):
X_rc = np.array([seq[::-1,::-1] for seq in X]) # reverse_comp
y_rc = y # keep same label
return np.vstack((X_rc, X)), np.vstack((y_rc, y))
def shift_sequence(seq, shift, pad_value=0.25):
"""Shift a sequence left or right by shift_amount.
Args:
seq: [batch_size, seq_depth, seq_length] sequence
shift: signed shift value (int)
pad_value: value to fill the padding (primitive or scalar)
"""
if len(seq.shape) != 3:
raise ValueError('input sequence should be rank 3')
input_shape = seq.shape
pad = pad_value * np.ones_like(seq[:, :, 0:np.abs(shift)])
def _shift_right(_seq):
# shift is positive
sliced_seq = _seq[:, :, :-shift:]
return np.concatenate([pad, sliced_seq], axis=-1)
def _shift_left(_seq):
# shift is negative
sliced_seq = _seq[:, :, -shift:]
return np.concatenate([sliced_seq, pad], axis=-1)
if shift > 0:
sseq = _shift_right(seq)
else:
sseq = _shift_left(seq)
return sseq
def seq_shift_augment(X, y, shift=None):
if shift is None:
shift = X.shape[-1] // 4
X_rc_left = shift_sequence(X, shift) # reverse_comp
X_rc_right = shift_sequence(X, -shift) # reverse_comp
return np.vstack((X_rc_left, X_rc_right, X)), np.vstack([y]*3)
```
#### File: NvTK/Data/Target.py
```python
def onehot_encode(label):
from sklearn.preprocessing import label_binarize
return label_binarize(label, classes=range(np.max(label)+1))
def map_prob2label(y_pred_prob, map_fn=np.argmax):
assert isinstance(y_pred_prob, np.ndarray)
return np.array(list(map(map_fn, y_pred_prob)))
```
#### File: NvTK/Explainer/Gradiant.py
```python
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from captum.attr import Saliency, LayerConductance
def contribution_input_grad(model, input_tensor, multiply_by_inputs=True):
input_tensor.requires_grad_()
y = model(input_tensor).sum()
y.backward()
grad = input_tensor.grad
if multiply_by_inputs:
grad = grad * input_tensor
return grad.data.cpu().numpy()
def deep_explain_saliancy(model, input_tensor, n_class=1, use_abs=True):
saliency = Saliency(model)
saliency_val_l = []
for i_class in range(n_class):
attribution = saliency.attribute(input_tensor, target=i_class)
saliency_vals = attribution.cpu().data.numpy()
if use_abs:
saliency_vals = np.abs(saliency_vals)
saliency_val_l.append(saliency_vals)
return np.array(saliency_val_l)
def input_saliancy_location(model, input_tensor, n_class=3, use_abs=True):
saliency_val_l = deep_explain_saliancy(model, input_tensor, n_class=n_class, use_abs=use_abs)
saliency_val = saliency_val_l.mean(0).mean(0).mean(0)
saliency_length = pd.DataFrame(enumerate(saliency_val), columns=["location","saliancy"])
return saliency_length
def plot_saliancy_location(model, input_tensor, n_class=3, use_abs=True):
saliency_length = input_saliancy_location(model, input_tensor, n_class=n_class, use_abs=use_abs)
plt.figure(figsize=(30,4))
ax = sns.lineplot(x="location", y="saliancy", data=saliency_length)
plt.show()
plt.close()
def deep_explain_layer_conductance(model, model_layer, input_tensor, n_class=1):
layer_cond = LayerConductance(model, model_layer)
cond_val_l = []
for i_class in range(n_class):
attribution = layer_cond.attribute(input_tensor, target=i_class, internal_batch_size=32)
cond_vals = attribution.data.cpu().numpy()
cond_val_l.append(cond_vals)
return np.array(cond_val_l)
def label_neuron_importance(model, model_layer, input_tensor, label):
n_class = len(label)
imp = deep_explain_layer_conductance(model, model_layer, input_tensor, n_class=n_class)
imp = imp.max(-1).mean(1)
df = pd.DataFrame(imp, index=label)
return df
def plot_label_neuron_importance(model, model_layer, input_tensor, label):
df = label_neuron_importance(model, model_layer, input_tensor, label)
plt.figure(figsize=(30,4))
ax = sns.heatmap(df, cmap="Greys")
plt.savefig("label_neuron_importance.pdf")
plt.show()
plt.close()
```
#### File: NvTK/Explainer/Motif.py
```python
import logging
import numpy as np
def trim_ic(motif, cutoff=0.4, background=0.25):
"""Trim motif based on IC(Bernouli)"""
H = (motif * np.log2(motif / background + 1e-6)).sum(0)
where = np.where(H > cutoff)[0]
motif = motif[:, where.min():where.max()+1]
return motif
def calc_motif_IC(motif, background=0.25):
"""Motif IC Bernouli"""
H = (motif * np.log2(motif / background + 1e-6)).sum()
logging.info("Motif IC(Bernouli): %.4f" % H)
return H
def info_content(pwm, bg=0.5):
"""Motif IC Bernouli"""
pseudoc = 1e-6
bg_pwm = [1-bg, bg, bg, 1-bg]
ic = 0
for i in range(pwm.shape[0]):
for j in range(4):
ic += -bg_pwm[j]*np.log2(bg_pwm[j]) + pwm[i][j]*np.log2(pseudoc + pwm[i][j])
return ic
def calc_motif_entropy(motif, background=0.25):
'''Motif Entropy'''
H = -(motif * np.log2(motif / background + 1e-6)).sum()
logging.info("Motif Entropy: %.4f" % H)
return H
def calc_motif_frequency(motif_IC):
'''Motif Frequency'''
f = np.power(2, -(motif_IC - 1))
logging.info("Motif Frequency: %.4f" % f)
return f
def calc_frequency_W(W, background=0.25):
'''Calculate motif Frequency in pwms'''
motif_frequency_l, motif_IC_l = [], []
for pwm in W:
pwm = normalize_pwm(pwm)
motif_IC = calc_motif_IC(pwm)
motif_freq = calc_motif_frequency(motif_IC)
motif_IC_l.append(motif_IC); motif_frequency_l.append(motif_freq)
return motif_frequency_l, motif_IC_l
def normalize_pwm(pwm, factor=None, max=None):
'''Normalize pwm'''
if not max:
max = np.max(np.abs(pwm))
pwm = pwm/max
if factor:
pwm = np.exp(pwm*factor)
norm = np.outer(np.ones(pwm.shape[0]), np.sum(np.abs(pwm), axis=0))
pwm = pwm/norm
pwm[np.isnan(pwm)] = 0.25 # background
return pwm
def meme_generate(W, output_file='meme.txt', prefix='Motif_'):
'''Generate meme file for pwms'''
# background frequency
nt_freqs = [1./4 for i in range(4)]
# open file for writing
f = open(output_file, 'w')
# print intro material
f.write('MEME version 4\n')
f.write('\n')
f.write('ALPHABET= ACGT\n')
f.write('\n')
f.write('strands: + -\n')
f.write('\n')
f.write('Background letter frequencies:\n')
f.write('A %.4f C %.4f G %.4f T %.4f \n' % tuple(nt_freqs))
f.write('\n')
for j in range(len(W)):
pwm = normalize_pwm(W[j])
f.write('MOTIF %s%d %d\n' % (prefix, j, j))
f.write('\n')
f.write('letter-probability matrix: alength= 4 w= %d nsites= %d E= 0\n' % (pwm.shape[1], pwm.shape[1]))
for i in range(pwm.shape[1]):
f.write(' %.4f\t %.4f\t %.4f\t %.4f\t\n' % tuple(pwm[:,i]))
f.write('\n')
f.close()
```
#### File: NvTK/Modules/MultiTask.py
```python
import logging
from copy import copy
import torch
from torch import nn
import networkx as nx
LAYER_KEY = 'layers'
NAME_KEY = 'name'
ANCHOR_KEY = 'anchor_layer'
LOSS_KEY = 'loss'
LOSS_REG_KEY = 'loss_weight'
AUTO_WEIGHT_KEY = 'auto'
WEIGHT_INIT_KEY = 'loss_init_val'
MISSING_WEIGHT_MSG = "Expect {0} for task {1} but none provided."
class MTLModel(nn.Module):
"""
A torch.nn.Module built from a set of shared and task specific layers
Attributes
----------
g : networkx.Graph
The meta-computation graph
task_layers : list
A list which holds the layers for which to build the computation graph
output_tasks : list
A list which holds the tasks for which the output should be returned
layer_names : list
A list of the names of each layer
losses : dict
A dictionary which maps the name of a layer to its loss function
loss_weights : dict
A dictionary which maps the name of a layer to the weight of its loss
function
"""
def __init__(self, task_layers, output_tasks):
super(MTLModel, self).__init__()
self.task_layers = task_layers
self.output_tasks = output_tasks
self.layer_names = [t[NAME_KEY] for t in task_layers]
self._initialize_graph()
self._initialize_losses()
self._initialize_loss_weights()
def _initialize_losses(self):
self.losses = {task[NAME_KEY]: task[LOSS_KEY] for task in self.task_layers if LOSS_KEY in task.keys()}
def _initialize_loss_weights(self):
self.loss_weights = {}
for task in self.task_layers:
self._set_loss_weight(task)
def _set_loss_weight(self, task):
task_name = task[NAME_KEY]
if LOSS_REG_KEY in task.keys():
if task[LOSS_REG_KEY] == AUTO_WEIGHT_KEY:
assert WEIGHT_INIT_KEY in task.keys(),\
MISSING_WEIGHT_MSG.format(WEIGHT_INIT_KEY, task_name)
loss_weight = task[WEIGHT_INIT_KEY]
loss_name = f'{task_name}_loss'
loss_weight = torch.nn.Parameter(torch.full((1,), loss_weight))
setattr(self, loss_name, loss_weight)
self.loss_weights[task_name] = getattr(self, loss_name)
else:
self.loss_weights[task_name] = task[LOSS_REG_KEY]
def _initialize_graph(self):
self.g = nx.DiGraph()
self.g.add_node('root')
self._build_graph()
def _bfs_forward(self, start_node):
''' Here we iteratore through the graph in a BFS-fashion starting from
`start_node`, typically this is the `root` node. This node is skipped
and we pass the input data and resulting outputs from all layers foward.
'''
visited = {node: False for node in self.layer_names}
# First node is visited
queue = [start_node]
visited[start_node] = True
while queue:
node = queue.pop(0)
if node != start_node:
input_nodes = self.g.predecessors(node)
if logging.getLogger().level == logging.DEBUG:
l = copy(input_nodes)
print(f"Feeding output from {list(l)} into {node}")
cur_layer = getattr(self, node)
# Get the output from the layers that serve as input
output_pre_layers = []
output_complete = True
for n in input_nodes:
# If an output is not ready yet, because that node has not
# been computed, we put the current node back into the queue
if n not in self.outputs.keys():
if logging.getLogger().level == logging.DEBUG:
print(f"No output for layer {n} yet")
output_complete = False
break
else:
output_pre_layers.append(self.outputs[n])
if not output_complete:
if logging.getLogger().level == logging.DEBUG:
print(f"Putting {node} back into the queue.")
queue.append(node)
else:
cur_output = cur_layer(*output_pre_layers)
self.outputs[node] = cur_output
for i in self.g.successors(node):
if visited[i] == False:
queue.append(i)
visited[i] = True
losses, loss_weights = self._get_losses()
return [self.outputs[t] for t in self.output_tasks], losses, loss_weights
def _get_losses(self):
losses = []
loss_weights = []
for t in self.output_tasks:
losses.append(self.losses.get(t))
loss_weights.append(self.loss_weights.get(t))
return losses, loss_weights
def _build_graph(self):
for layer in self.task_layers:
self._add_layer(layer)
self._add_to_graph(layer)
def _add_to_graph(self, layer):
layer_name = layer[NAME_KEY]
self._add_node(layer_name)
if 'anchor_layer' not in layer.keys():
# If there is no anchor layer, we expect it to be a layer which
# receives data inputs and is hence connected to the root node
self.g.add_edge('root', layer_name)
else:
anchor_layer = layer[ANCHOR_KEY]
if isinstance(anchor_layer, list):
for a_l_name in anchor_layer:
self._add_node(a_l_name)
self.g.add_edge(a_l_name, layer_name)
else:
self._add_node(anchor_layer)
self.g.add_edge(anchor_layer, layer_name)
def _add_node(self, layer):
if isinstance(layer, str):
layer_name = layer
self.g.add_node(layer_name)
else:
layer_name = layer[NAME_KEY]
self.g.add_node(layer_name)
if 'anchor_layer' not in layer.keys():
self.g.add_edge('root', layer_name)
def _add_layer(self, layer):
layer_modules = layer[LAYER_KEY]
layer_name_main = layer[NAME_KEY]
setattr(self, layer_name_main, layer_modules)
def forward(self, input):
self.outputs = {'root': input}
return self._bfs_forward('root')
class ConcatenateTask(nn.Module):
def forward(self, x):
return torch.cat(x, dim=1)
class MultiTaskWrapper(nn.Module):
def __init__(self, sub_models, concat_dim=1):
"""
The multi-model wrapper class can be used to concatenate the
outputs of multiple models along a pre-specified axis. The wrapper
can be used to load and run multiple trained models during prediction
functions. This class should not be used for training.
Parameters
----------
sub_models : list(torch.nn.Module)
The 'sub-models' that are used in this multi-model wrapper class.
concat_dim : int, optional
Default is 1. The dimension along which to concatenate the models'
predictions.
"""
super().__init__()
self.sub_models = sub_models
self._concat_dim = concat_dim
def cuda(self):
for sm in self.sub_models:
sm.cuda()
def eval(self):
for sm in self.sub_models:
sm.eval()
def forward(self, x):
return torch.cat([sm(x) for sm in self.sub_models], self._concat_dim)
class MTLTrainer(object):
def __init__(self, model, criterion, optimizer, device):
super().__init__()
# train one epoch
self.model = model.to(device)
self.device = device
self.criterion = criterion
self.optimizer = optimizer
def train_per_epoch(self, train_loader, epoch, verbose_step=5):
batch_losses = []
self.model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(self.device), target.to(self.device)
self.optimizer.zero_grad()
# Our model will return a list of predictions (from the layers specified in `output_tasks`),
# loss functions, and regularization parameters (as defined in the tasks variable)
y_hat, l_funcs, l_weights = self.model(data)
loss = 0
idx = 0
# We can now iterate over the tasks and accumulate the losses
for i, cnt in enumerate(anno_cnt.values):
loss += l_weights[i] * l_funcs[i](y_hat[i], target[:,idx:idx+cnt])
idx += cnt
# self.criterion(output, target)
loss.backward()
self.optimizer.step()
if verbose_step:
if batch_idx % verbose_step == 0:
logging.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data))
batch_losses.append(loss.cpu().item())
average_loss = np.average(batch_losses)
return average_loss
def evaluate(self, data):
eval_loader = data
batch_losses, all_predictions, all_targets = [], [], []
self.model.eval()
with torch.no_grad():
for inputs, targets in eval_loader:
inputs, targets = inputs.to(self.device), targets.to(self.device)
y_hat, l_funcs, l_weights = self.model(inputs)
loss = 0
idx = 0
# We can now iterate over the tasks and accumulate the losses
for i, cnt in enumerate(anno_cnt.values):
loss += l_weights[i] * l_funcs[i](y_hat[i], targets[:,idx:idx+cnt])
idx += cnt
test_loss = loss
batch_losses.append(test_loss.cpu().item())
output = torch.cat(y_hat, dim=1)
all_predictions.append(output.cpu().data.numpy())
all_targets.append(targets.cpu().data.numpy())
average_loss = np.average(batch_losses)
all_predictions = np.vstack(all_predictions)
all_targets = np.vstack(all_targets)
return average_loss, all_predictions, all_targets
def get_current_model(self):
return self.model
```
#### File: NvTK/NvTK/Trainer.py
```python
import os, time, shutil, logging, pickle, random
import numpy as np
import torch
from .Evaluator import calculate_correlation, calculate_roc
class Trainer(object):
"""Model Trainer in NvTK.
Trainer class could train and validate a NvTK or pytorch-based model.
Trainer saved the model parameters to `Log` dirs, overwriting it
if the latest validation performance is better than
the previous best-performing model.
Trainer maintain a Log dict to record the loss and metric
during training, Trainer will save the Log to `Figures`.
Parameters
----------
model : NvTK.model, torch.nn.module
The NvTK model architecture,
or torch module container of parameters and methods.
criterion : torch.nn.module
The criterion of loss function,
criterion was optimized during training.
optimizer : torch.optim
The optimizer to update the weights of Parameter,
for minimizing the criterion during training.
device : torch.device
Context-manager that changes the selected device,
representing the device on which the model is or will be allocated,
('cuda' or 'cpu').
tasktype : str, optional
Specify the task type, Default is "regression".
(e.g. `tasktype="binary_classification"`)
metric_sample : int, optional
The number of sampled tasks to metric. Default is 100.
For multi-task learning with more than `metric_sample` task,
we will sample tasks when calculating the metric values.
item_sample : int, optional
The number of sampled items to metric. Default is 50000.
For dataset with more than `item_sample` items,
we will sample items when calculating the metric values.
patience : int, optional
The number of patience in early stopping methods, Default is 10.
Early stopping could not work with a very large patience,
set `patience=np.inf` to omit early stopping.
resume : bool, optional
Whether to resume model training, Default is False.
resume_dirs : str, optional
The directory in which to resume model training, Default is False.
use_tensorboard : bool, optional
Whether to use tensorboard. Default is False.
tensorboard_args : dict, optional
tensorboard arguments.
Attributes
----------
model :
The container of parameters and methods.
device : str
An object representing the device on which the model is or will be allocated.
('cuda' or 'cpu').
criterion :
The criterion the model aims to minimize.
optimizer :
The algorithm that updates the weights of Parameter during the backward step.
patience : int
The number of epochs to be trained after activating early stopping.
pred_prob : bool
To judge the task type.
metric_sample : int
The number of metric sample.
item_sample : int
The number of item sample.
logs: dict
logs maintains loss and metric information during training.
tensorboard : bool
Whether to use tensorboard.
writer :
Write tensorboard arguments to the summary.
"""
def __init__(self, model, criterion, optimizer, device, tasktype="regression",
metric_sample=100, item_sample=50000, patience=10,
resume=False, resume_dirs=None,
use_tensorbord=False, tensorbord_args={}):
super().__init__()
self.model = model.to(device)
self.device = device
self.criterion = criterion
self.optimizer = optimizer
self.patience = patience
# task related
self.pred_prob = tasktype != "regression"
# sample tasks for calculate metric
self.metric_sample = metric_sample
self.item_sample = item_sample
# Trainer will maintain logs information
self.logs = {
"train_batch_loss_list":[],
"val_batch_loss_list":[],
"test_batch_loss_list":[],
"train_loss_list":[],
"val_loss_list":[],
"test_loss_list":[],
"val_metric_list":[],
"test_metric_list":[],
"train_metric_list":[],
"lrs":[],
"best_val_loss":np.inf,
"best_val_r":-np.inf,
"best_val_epoch":0,
}
# TODO resume model training
if resume and resume_dirs:
pass
# TODO capture error when loading tensorbord in lower version
if use_tensorbord:
from torch.utils.tensorboard import SummaryWriter
self.tensorbord = True
self.writer = SummaryWriter(**tensorbord_args)
else:
self.tensorbord = False
def train_until_converge(self, train_loader, validate_loader, test_loader, EPOCH, resume=False, verbose_step=5):
"""
Train until converge.
Parameters
----------
train_loader :
The data loader defined by using training dataset.
validate_loader :
The data loader defined by using validation dataset.
test_loader :
The data loader defined by using testing dataset.
EPOCH : int
An adjustable hyperparameter, The number of times to train the model until converge.
resume : bool
Whether to resume the model training. Default is False.
verbose_step : int
The number of steps to print the loss value. Default is 5.
Attributes
----------
tensorboard : bool
Whether to use tensorboard.
add_graph :
To iterate and visualize some samples in the training dataset.
train_per_epoch :
Train each epoch.
predict :
To predict based on the input data
evaluate :
To evaluate the performance of the model.
save_checkpoint :
save checkpoint.
writer :
To add scalar or graph to the summary.
load_best_model :
load the best model.
"""
# graph
if self.tensorbord:
try:
self.add_graph(next(iter(train_loader))[0][:2].shape)
except BaseException as e:
logging.warning("tensorbord cannot added graph")
logging.warning(e)
for epoch in range(EPOCH):
# train
train_batch_loss, train_loss = self.train_per_epoch(train_loader, epoch, verbose_step=20)
# train metrics
train_batch_loss, train_loss, train_pred_prob, train_target_prob = self.predict(train_loader)
train_metric = self.evaluate(train_pred_prob, train_target_prob)
# validation metrics
val_batch_loss, val_loss, val_pred_prob, val_target_prob = self.predict(validate_loader)
val_metric = self.evaluate(val_pred_prob, val_target_prob)
# test metrics
test_batch_loss, test_loss, test_pred_prob, test_target_prob = self.predict(test_loader)
test_metric = self.evaluate(test_pred_prob, test_target_prob)
# lr_scheduler
_lr = self.optimizer.param_groups[0]['lr']
# lr_scheduler.step(val_loss)
# logs
logging.info("Train\t Accuracy: %.4f\t Loss: %.4f\t\n" % (train_metric, train_loss))
logging.info("Eval\t Accuracy: %.4f\t Loss: %.4f\t\n" % (val_metric, val_loss))
self.logs['train_batch_loss_list'].extend(train_batch_loss)
self.logs['val_batch_loss_list'].extend(val_batch_loss)
self.logs['test_batch_loss_list'].extend(test_batch_loss)
self.logs['train_loss_list'].append(train_loss)
self.logs['val_loss_list'].append(val_loss)
self.logs['test_loss_list'].append(test_loss)
self.logs['train_metric_list'].append(train_metric)
self.logs['val_metric_list'].append(val_metric)
self.logs['test_metric_list'].append(test_metric)
self.logs['lrs'].append(_lr)
self.show_trainer_log()
if self.tensorbord:
self.writer.add_scalar('Loss/train', train_loss, epoch)
self.writer.add_scalar('Loss/test', test_loss, epoch)
self.writer.add_scalar('Accuracy/train', train_metric, epoch)
self.writer.add_scalar('Accuracy/test', test_metric, epoch)
try:
self.writer.add_histogram('Embedding.conv.bias', self.model.Embedding.conv.bias, epoch)
self.writer.add_histogram('Embedding.conv.weight', self.model.Embedding.conv.weight, epoch)
except:
pass
# update best
if val_loss < self.logs["best_val_loss"] or val_metric > self.logs["best_val_r"]:
self.logs["best_val_loss"] = val_loss
self.logs["best_val_r"] = val_metric
self.logs["best_val_epoch"] = epoch
logging.info("Eval\t Best Eval Accuracy: %.4f\t Loss: %.4f\t at Epoch: %d\t lr: %.8f\n" % (val_metric, val_loss, epoch, _lr))
logging.info("Eval\t Test Accuracy: %.4f\t Loss: %.4f\n" % (test_metric, test_loss))
self.save_checkpoint(best=True)
# or checkpoint
elif epoch % 20 == 1:
self.save_checkpoint()
# early stop
if epoch >= self.logs["best_val_epoch"] + self.patience: #patience_epoch:
break
# final
fname = time.strftime("./Log/best_model@" + '%m%d_%H:%M:%S')
shutil.copyfile("./Log/best_model.pth", fname+".params.pth")
pickle.dump(self.logs, open(fname+".chekc_train_log.p", 'wb'))
# final logs
self.show_trainer_log(final=True)
logging.info("\n"+self.model.__str__()+'\n')
logging.info("Best Val Loss\t%.8f\t@epoch%d" % (self.logs['best_val_loss'], self.logs["best_val_epoch"]))
logging.info("Best Val Metric\t%.8f\t@epoch%d" % (self.logs['best_val_r'], self.logs["best_val_epoch"]))
logging.info("Best Test Metric\t%.8f\t@epoch%d" % (np.max(self.logs['test_metric_list']), np.argmax(self.logs['test_metric_list'])))
logging.info("Best Test Loss\t%.8f\t@epoch%d" % (np.min(self.logs['test_loss_list']), np.argmin(self.logs['test_loss_list'])))
self.load_best_model()
def train_per_epoch(self, train_loader, epoch, verbose_step=5):
"""
Train each epoch.
Parameters
----------
train_loader :
The data loader defined by using training dataset.
epoch : int
An adjustable hyperparameter, The number of times to train the model until converge.
verbose_step : int
The number of steps to print the loss value. Default is 5.
Attributes
----------
train_batch :
To train the batch fetched from the train_loader.
Returns
-------
batch_losses : list
The total loss values of all batches.
average_loss : list
The average of the losses of batches.
"""
# train one epoch
batch_losses = []
self.model.train()
for batch_idx, (data, target) in enumerate(train_loader):
loss = self.train_batch(data, target)
if batch_idx % verbose_step == 0:
logging.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss))
batch_losses.append(loss)
average_loss = np.average(batch_losses)
return batch_losses, average_loss
def train_batch(self, data, target):
"""
To meassure the loss of the batch.
Parameters
----------
data : numpy.ndarray
The input data.
target : numpy.ndarray
True value that the user model was trying to predict.
Attributes
----------
model :
The container of parameters and methods.
device : str
An object representing the device on which the model is or will be allocated.
('cuda' or 'cpu').
criterion :
The criterion the model aims to minimize.
optimizer :
The algorithm that updates the weights of Parameter during the backward step.
Returns
-------
loss : float
The loss value.
"""
self.model.train()
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.cpu().item()
def predict(self, data_loader):
"""
To predict based on the input data.
parameters
----------
data_loader :
The object to prepare the data for training, which retrieves batches of features and labels from the dataset iteratively.
Attributes
----------
model :
The container of parameters and methods.
device : str
An object representing the device on which the model is or will be allocated.
('cuda' or 'cpu').
criterion :
The criterion the model aims to minimize.
Returns
-------
batch_losses : list
The total loss values of all batches.
average_loss : list
The average of the losses of batches.
all_predictions : array
All the predictions of the input data organized in arrays.
all_targets : array
All the targets organized in arrays.
"""
batch_losses, all_predictions, all_targets = [], [], []
self.model.eval()
with torch.no_grad():
for inputs, targets in data_loader:
inputs, targets = inputs.to(self.device), targets.to(self.device)
output = self.model(inputs)
test_loss = self.criterion(output, targets)
batch_losses.append(test_loss.cpu().item())
all_predictions.append(output.cpu().data.numpy())
all_targets.append(targets.cpu().data.numpy())
average_loss = np.average(batch_losses)
all_predictions = np.vstack(all_predictions)
all_targets = np.vstack(all_targets)
return batch_losses, average_loss, all_predictions, all_targets
def evaluate(self, predict_prob, target_prob, sample_tasks=True, sample_items=True):
"""
To evaluate the performance of the model.
Parameters
----------
predict_prob : list
prediction probability
target_prob : list
target probability
sample_tasks : bool
Whether to sample tasks. Default is TRUE.
sample_items : bool
Whether to sample items. Default is TRUE.
Attributes
----------
metric_sample : int
The number of metric sample.
item_sample : int
The number of item sample.
pred_prob : bool
Whether to calculate AUC-ROC curve.
Returns
-------
metric : float
The arithmetic mean of the auc/correlation, which is calculated by target probability and prediction probability.
"""
item_size, output_size = target_prob.shape
if self.metric_sample < output_size and sample_tasks:
metric_sample_idx = random.sample(range(output_size), self.metric_sample)
logging.info("sampled %d tasks for metric" % self.metric_sample)
target_prob = target_prob[:, metric_sample_idx]
predict_prob = predict_prob[:, metric_sample_idx]
if self.item_sample < item_size and sample_items:
item_sample_idx = random.sample(range(item_size), self.item_sample)
logging.info("sampled %d items for metric" % self.item_sample)
target_prob = target_prob[item_sample_idx, :]
predict_prob = predict_prob[item_sample_idx, :]
if self.pred_prob:
fpr, tpr, roc_auc = calculate_roc(target_prob, predict_prob)
roc_l = [roc_auc[k] for k in roc_auc.keys() if roc_auc[k] >=0 and k not in ["macro", "micro"]]
metric = np.mean(roc_l)
else:
correlation, pvalue = calculate_correlation(target_prob, predict_prob)
correlation_l = [correlation[k] for k in correlation.keys() if k not in ["macro", "micro"]] # dict keys ordered by default in py3.7+
metric = np.mean(correlation_l)
return metric
def add_graph(self, input_shape=(2, 4, 1000)):
"""
add graph.
Parameters
----------
input_shape : tuple
"""
self.writer.add_graph(self.model, torch.rand(input_shape).to(self.device))
def load_best_model(self, fpath="./Log/best_model.pth"):
"""
load the best model.
Parameters
----------
fpath : str
The file path of the best model.
"""
self.model.load(fpath)
return self.model
def get_current_model(self):
"""
get the current model.
"""
return self.model
def save_checkpoint(self, best=False):
"""
save checkpoint.
Parameters
----------
best : bool
Whether the current model is the best model. Default is False.
"""
os.makedirs("./Log", exist_ok=True)
self.model.save("./Log/chekc_model.pth")
torch.save(self.model, "./Log/chekc_model.p")
pickle.dump(self.logs, open("./Log/chekc_train_log.p", 'wb'))
if best:
self.model.save("./Log/best_model.pth")
torch.save(self.model, "./Log/best_model.p")
def show_trainer_log(self, final=False):
show_train_log(self.logs['train_loss_list'],
self.logs['train_metric_list'],
self.logs['val_loss_list'],
self.logs['val_metric_list'],
self.logs['test_loss_list'],
self.logs['test_metric_list'],
self.logs['lrs'], output_fname='current_logs.pdf')
show_train_log(loss_train=self.logs['train_loss_list'],
loss_val=self.logs['val_loss_list'],
loss_test=self.logs['test_loss_list'],
output_fname='current_logs_loss.pdf')
show_train_log(loss_val=self.logs['val_batch_loss_list'], output_fname='current_batch_loss_val.pdf')
show_train_log(loss_test=self.logs['test_batch_loss_list'], output_fname='current_batch_loss_test.pdf')
show_train_log(loss_train=self.logs['train_batch_loss_list'], output_fname='current_batch_loss_train.pdf')
show_train_log(loss_val=self.logs['val_loss_list'], output_fname='current_loss_val.pdf')
show_train_log(loss_test=self.logs['test_loss_list'], output_fname='current_loss_test.pdf')
show_train_log(loss_train=self.logs['train_loss_list'], output_fname='current_loss_train.pdf')
show_train_log(acc_val=self.logs['train_metric_list'], output_fname='current_acc_train.pdf')
show_train_log(acc_val=self.logs['val_metric_list'], output_fname='current_acc_val.pdf')
show_train_log(acc_test=self.logs['test_metric_list'], output_fname='current_acc_test.pdf')
show_train_log(lrs=self.logs['lrs'], output_fname='current_lrs.pdf')
if final:
show_train_log(loss_val=self.logs['val_loss_list'], output_fname='final_loss_val.pdf')
show_train_log(loss_train=self.logs['train_loss_list'], output_fname='final_loss_train.pdf')
show_train_log(acc_val=self.logs['train_metric_list'], output_fname='final_acc_train.pdf')
show_train_log(acc_val=self.logs['val_metric_list'], output_fname='final_acc_val.pdf')
show_train_log(acc_test=self.logs['test_metric_list'], output_fname='final_acc_test.pdf')
show_train_log(lrs=self.logs['lrs'], output_fname='final_lrs.pdf')
def show_train_log(loss_train=None, loss_val=None, acc_val=None,
loss_test=None, acc_test=None,
lrs=None,
fig_size=(12,8),
save=True,
output_dir='Figures',
output_fname="Training_loss_log.pdf",
style="seaborn-colorblind",
fig_title="Training Log",
dpi=500):
"""function show train log in NvTK.
Parameters
----------
loss_train : list
traing loss
loss_val : list
validation loss
kernel_size : int, optional
Size of the convolving kernel
"""
os.makedirs(output_dir, exist_ok=True)
import matplotlib
backend = matplotlib.get_backend()
if "inline" not in backend:
matplotlib.use("PDF")
import matplotlib.pyplot as plt
plt.style.use(style)
plt.figure()
if loss_train:
plt.plot(range(1, len(loss_train)+1), loss_train, 'b', label='Training Loss')
if loss_val:
plt.plot(range(1, len(loss_val)+1), loss_val, 'r', label='Validation Loss')
if loss_test:
plt.plot(range(1, len(loss_test)+1), loss_test, 'black', label='Test Loss')
rate = 1
if acc_val:
plt.plot(range(1, len(acc_val)+1), list(map(lambda x: x*rate, acc_val)), 'g', label=str(rate)+'X Validation Accuracy')
if acc_test:
plt.plot(range(1, len(acc_test)+1), list(map(lambda x: x*rate, acc_test)), 'purple', label=str(rate)+'X Test Accuracy')
if lrs:
rate = int(1/lrs[0])
plt.plot(range(1, len(lrs)+1), list(map(lambda x: x*rate, lrs)), 'y', label=str(rate)+'X Learning Rates')
plt.title(fig_title)
plt.legend()
if save:
plt.savefig(os.path.join(output_dir, output_fname),
format="pdf", dpi=dpi)
else:
plt.show()
plt.close()
``` |
{
"source": "jiaqiluo/grammarAnalyzer",
"score": 4
} |
#### File: jiaqiluo/grammarAnalyzer/grammarAnalyzer.py
```python
import os, json
def load_in_file():
filename = raw_input("Enter the filename(eg. test1.json): ")
if not os.path.isfile(filename):
print "Error: no such file in current dirctory."
return load_in_file()
f = open(filename)
content = json.load(f)
return content
def get_description(content):
if content:
return content['description']
else:
return False
def get_grammar(content):
if content:
return content['rule']
else:
return False
def get_test_string():
test_string = ''
while not test_string:
test_string = raw_input("\nEnter string: ")
return test_string
def parser(grammar, test_string):
stack = [] # Ready?
stack.append('S') # Go!
while len(test_string) != 0:
if len(stack) == 0:
return False
element = stack.pop()
if element in grammar:
index = find(grammar[element], test_string[0])
if index < 0:
return False
else:
for i in reversed(grammar[element][index]):
stack.append(i)
else:
if element == test_string[0]:
# marked off first char of the testing string
test_string = test_string[1:len(test_string)]
else:
return False
if len(stack) != 0:
return False
else:
return True
def find(rules, letter):
index = -1
for i in range(len(rules)):
if rules[i][0] == letter:
index = i
return index
def main():
sample = load_in_file()
print "The grammar is: \n\t\t" + get_description(sample)
again = True
while again:
test_string = get_test_string()
if parser(get_grammar(sample), test_string):
print "ACCEPT"
else:
print "REJECT"
response = raw_input("Again(y/n)? ")
while response not in ['Y', 'y', 'n', 'N']:
print "Enter 'y' or 'Y' or 'n' or 'N'"
response = raw_input("Again(y/n)? ")
if response in ['n', 'N']:
again = False
if __name__ == "__main__":
main()
``` |
{
"source": "jiaqiluo/rancher-monitoring-v1-to-v2",
"score": 2
} |
#### File: jiaqiluo/rancher-monitoring-v1-to-v2/migrate_rules.py
```python
import click
import requests
import json
import yaml
import base64
import sys
from collections import OrderedDict
ComparisonHasValue = "has-value"
ComparisonEqual = "equal"
ComparisonNotEqual = "not-equal"
ComparisonGreaterThan = "greater-than"
ComparisonLessThan = "less-than"
ComparisonGreaterOrEqual = "greater-or-equal"
ComparisonLessOrEqual = "less-or-equal"
comparisonMap = {
ComparisonHasValue: "",
ComparisonEqual: "==",
ComparisonNotEqual: "!=",
ComparisonGreaterThan: ">",
ComparisonLessThan: "<",
ComparisonGreaterOrEqual: ">=",
ComparisonLessOrEqual: "<=",
}
NoResourcesToMigrate = """
Warning: Cluster Alerting seems to be disabled.
Could not extract any Alertmanager Config or any PrometheusRule resources from this cluster.
If you believe resources should have been picked up for migration, please file a bug or feature request with Rancher at https://github.com/rancher/rancher/issues/new.
"""
WarnAlertmanagerConfigSecretDNE = """
Warning: Cluster Alerting seems to be disabled.
Could not extract any Alertmanager Config for this cluster.
Any metric-based Alerting Groups / Alerting Rules have been outputted as PrometheusRule resources.
However, you will need to manually configure Routes and Receivers to set up notifications based on those alerts.
See Rancher docs for more information on how to configure notifications on alerts in Monitoring V2."""
class quoted(str):
pass
def quoted_presenter(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"')
yaml.add_representer(quoted, quoted_presenter)
class literal(str):
pass
def literal_presenter(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
yaml.add_representer(literal, literal_presenter)
def ordered_dict_presenter(dumper, data):
return dumper.represent_dict(data.items())
@click.command()
@click.option('--rancher-url', required=True, help="URL to source Rancher")
@click.option('--rancher-api-token', required=True, help="API Token for source Rancher")
@click.option('--cluster-id', required=True, help="ID for source cluster")
@click.option('--insecure', help="If set, do not verify tls certificates", is_flag=True)
def migrate(rancher_url, rancher_api_token, cluster_id, insecure):
verify=not insecure
requests.packages.urllib3.disable_warnings()
yaml.add_representer(OrderedDict, ordered_dict_presenter)
headers = {
"authorization": "Bearer %s" % rancher_api_token
}
# Get System Project
projects_url = "%s/v3/projects" % (rancher_url)
projects_response = requests.get(projects_url, headers=headers, verify=verify)
projects = json.loads(projects_response.content)
for project in projects["data"]:
if project["clusterId"] != cluster_id:
continue
if "authz.management.cattle.io/system-project" in project["labels"]:
system_project_id = project["id"]
break
# Get Alertmanager Config
alerting_config_url = "%s/v3/project/%s/namespacedSecrets/cattle-prometheus:alertmanager-cluster-alerting" % (rancher_url, system_project_id)
alerting_config_response = requests.get(alerting_config_url, headers=headers, verify=verify)
alerting_enabled = (alerting_config_response.status_code != 404)
if alerting_enabled:
alerting_enabled=True
alerting_config = json.loads(alerting_config_response.content)
alertmanager_yaml = yaml.safe_load(
base64_decode(alerting_config["data"]["alertmanager.yaml"])
)
# Get Notifiers by ID
notifiers_by_id = {}
notifiers_url = "%s/v3/notifier" % (rancher_url)
notifiers_response = requests.get(notifiers_url, headers=headers, verify=verify)
notifiers = json.loads(notifiers_response.content)
for notifier in notifiers["data"]:
if notifier["clusterId"] != cluster_id:
continue
notifier_id = notifier["id"]
notifiers_by_id[notifier_id] = {
"name": notifier["name"],
"group_ids": []
}
# Gather PrometheusRules from AlertGroups / AlertRules
prometheus_rules = []
cluster_alert_groups_url = "%s/v3/clusterAlertGroups" % (rancher_url)
project_alert_groups_url = "%s/v3/projectAlertGroups" % (rancher_url)
for group_url in [cluster_alert_groups_url, project_alert_groups_url]:
alert_groups_response = requests.get(group_url, headers=headers, verify=verify)
alert_groups = json.loads(alert_groups_response.content)
for alert_group in alert_groups["data"]:
if "projectId" in alert_group:
if alert_group["projectId"].split(":")[0] != cluster_id:
continue
if alert_group["projectId"] == system_project_id:
continue
alert_rules_response = requests.get(alert_group["links"]["projectAlertRules"], headers=headers, verify=verify)
else:
if alert_group["clusterId"] != cluster_id:
continue
alert_rules_response = requests.get(alert_group["links"]["clusterAlertRules"], headers=headers, verify=verify)
alert_rules = json.loads(alert_rules_response.content)
rules = []
for alert_rule in alert_rules["data"]:
if not alert_rule["creatorId"] or not alert_rule["metricRule"]:
continue
metric_rule = alert_rule["metricRule"]
prometheus_expression = get_prometheus_expression(
metric_rule["expression"],
metric_rule["comparison"],
metric_rule["thresholdValue"]
)
message = get_message(
metric_rule["expression"],
metric_rule["comparison"],
metric_rule["thresholdValue"]
)
labels = {"severity": alert_rule["severity"]}
if alerting_enabled:
labels["group_id"] = alert_group["id"]
rule = OrderedDict(**{
"alert": literal(alert_rule["name"]),
"expr": literal(prometheus_expression),
"for": metric_rule["duration"],
"labels": labels,
"annotations": OrderedDict(
message=literal(message),
)
})
rules.append(rule)
if len(rules) == 0:
continue
group_id = alert_group["id"].split(":")[1]
prometheus_rule = {
"apiVersion": "monitoring.coreos.com/v1",
"kind": "PrometheusRule",
"metadata": {
"name": "rancher-alerting-v1-%s" % (group_id),
"namespace": "cattle-monitoring-system",
"labels": {
"source": "rancher-alerting-v1"
},
"annotations": {}
},
"spec": {
"groups": [
OrderedDict(**{
"name": alert_group["name"],
"interval": "%ss" % (alert_group["groupIntervalSeconds"]),
"rules": rules
})
]
}
}
if "description" in alert_group:
prometheus_rule["metadata"]["annotations"]["field.cattle.io/description"] = alert_group["description"]
prometheus_rules.append(prometheus_rule)
if alerting_enabled and "recipients" in alert_group:
for recipient in alert_group["recipients"]:
notifier_id = recipient["notifierId"]
notifiers_by_id[notifier_id]["group_ids"].append(alert_group["id"])
# Create resources
namespace = OrderedDict(**{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {
"name": "cattle-monitoring-system"
}
})
resources = [namespace]
if alerting_enabled:
alertmanager_yaml = update_alertmanager_config(alertmanager_yaml, notifiers_by_id)
alerting_config = OrderedDict(**{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "alertmanager-rancher-monitoring-alertmanager",
"namespace": "cattle-monitoring-system",
"labels": {
"source": "rancher-alerting-v1"
},
},
"data": {
"alertmanager.yaml": literal(base64_encode(yaml.dump(alertmanager_yaml))),
"notification.tmpl": literal(alerting_config["data"]["notification.tmpl"])
},
"type": "Opaque"
})
resources.append(alerting_config)
# Print Alertmanager Config as comment since its encoded as base64 in the template
print("# Alertmanager Config\n#")
print("# %s" % yaml.dump(alertmanager_yaml).replace("\n", "\n# "))
resources.extend(prometheus_rules)
if len(resources) == 1:
print(NoResourcesToMigrate, file=sys.stderr)
return
if not alerting_enabled:
print(WarnAlertmanagerConfigSecretDNE, file=sys.stderr)
print(yaml.dump_all(resources))
def update_alertmanager_config(alertmanager_yaml, notifiers_by_id):
alertmanager_yaml["route"]["receiver"] = "null"
alertmanager_yaml["route"]["group_by"] = ['job']
alertmanager_yaml["templates"] = ["/etc/alertmanager/config/*.tmpl"]
# Update to one Receiever per Notifier instead of one per alert group
receiver_by_group_id = {}
receivers = [{"name": "null"}]
for notifier_id in notifiers_by_id:
notifier = notifiers_by_id[notifier_id]
if len(notifier["group_ids"]) == 0:
continue
# Get receiver configuration
first_matching_notifier = notifier["group_ids"][0]
receiver = [r for r in alertmanager_yaml["receivers"] if r["name"] == first_matching_notifier][0]
receiver["name"] = notifier["name"]
receivers.append(receiver)
# Keep track of what group_ids need to be modified
for group_id in notifier["group_ids"]:
receiver_by_group_id[group_id] = receiver["name"]
alertmanager_yaml["receivers"] = receivers
# Update Recievers attached to routes accordingly
alertmanager_yaml["route"]["receiver"] = "null"
alertmanager_yaml["route"]["group_by"] = ['job']
routes = [{
"match": {"alertname": "Watchdog"},
"receiver": "null",
"continue": True
}]
for route in alertmanager_yaml["route"]["routes"]:
# Rule-specific routes are not supported
if "routes" in route:
del route["routes"]
group_id = route["receiver"]
if group_id in receiver_by_group_id:
route["receiver"] = receiver_by_group_id[group_id]
routes.append(route)
alertmanager_yaml["route"]["routes"] = routes
return alertmanager_yaml
def get_prometheus_expression(expression, comparison, threshold_value):
if comparison != ComparisonHasValue:
return "%s%s%s" % (expression, comparisonMap[comparison], threshold_value)
return expression
def get_message(expression, comparision, threshold_value):
comparision = comparision.replace("-", " ")
if "equal" in comparision:
comparision += " to"
return "Query %s is %s %s. Current value is {{ $value }}." % (expression, comparision, threshold_value)
def base64_decode(base64_msg):
base64_msg_bytes = base64_msg.encode('ascii')
msg_bytes = base64.b64decode(base64_msg_bytes)
return msg_bytes.decode('ascii')
def base64_encode(msg):
msg_bytes = msg.encode('ascii')
base64_msg_bytes = base64.b64encode(msg_bytes)
return base64_msg_bytes.decode('ascii')
if __name__ == '__main__':
migrate()
``` |
{
"source": "jiaqima/G3NN",
"score": 2
} |
#### File: jiaqima/G3NN/utils.py
```python
import os
import numpy as np
import torch
import scipy.sparse as sps
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
def load_data(dataset="cora",
num_labels_per_class=20,
missing_edge=False,
verbose=0):
# Load data.
path = os.path.join("data", dataset)
if verbose:
print("loading data from %s. %d labels per class." %
(path, num_labels_per_class))
assert dataset in ["cora", "pubmed", "citeseer"]
dataset = Planetoid(
root=path, name=dataset, transform=T.NormalizeFeatures())
data = dataset[0]
data.num_classes = dataset.num_classes
if missing_edge:
assert num_labels_per_class == 20
test_idx = data.test_mask.nonzero().squeeze().numpy()
edge_index = data.edge_index.numpy()
num_nodes = data.y.size(0)
adj = sps.csc_matrix((np.ones(edge_index.shape[1]), (edge_index[0], edge_index[1])), shape=(num_nodes, num_nodes))
adj_mask = np.ones(num_nodes)
adj_mask[test_idx] = 0
adj_mask = sps.diags(adj_mask, format="csr")
adj = adj_mask.dot(adj).dot(adj_mask.tocsc()).tocoo()
edge_index = np.concatenate([adj.row.reshape(1, -1), adj.col.reshape(1, -1)], axis=0)
data.edge_index = torch.LongTensor(edge_index)
# Original Planetoid setting.
if num_labels_per_class == 20:
return data
# Get one-hot labels.
temp = data.y.numpy()
labels = np.zeros((len(temp), temp.max() + 1))
for i in range(len(labels)):
labels[i, temp[i]] = 1
all_idx = list(range(len(labels)))
# Select a fixed number of training data per class.
idx_train = []
class_cnt = np.zeros(
labels.shape[1]) # number of nodes selected for each class
for i in all_idx:
if (class_cnt >= num_labels_per_class).all():
break
if ((class_cnt + labels[i]) > num_labels_per_class).any():
continue
class_cnt += labels[i]
idx_train.append(i)
if verbose:
print("number of training data: ", len(idx_train))
train_mask = np.zeros((len(labels), ), dtype=int)
val_mask = np.zeros((len(labels), ), dtype=int)
test_mask = np.zeros((len(labels), ), dtype=int)
for i in all_idx:
if i in idx_train:
train_mask[i] = 1
elif sum(val_mask) < 500: # select 500 validation data
val_mask[i] = 1
else:
test_mask[i] = 1
data.train_mask = torch.ByteTensor(train_mask)
data.val_mask = torch.ByteTensor(val_mask)
data.test_mask = torch.ByteTensor(test_mask)
return data
``` |
{
"source": "jiaqing23/private-table",
"score": 2
} |
#### File: jiaqing23/private-table/test_private_machine_learning.py
```python
import random
import numpy as np
import pandas as pd
import pytest
from privacy_budget import PrivacyBudget
from privacy_budget_tracker import MomentPrivacyBudgetTracker
from private_machine_learning import private_SGD
from utils import check_absolute_error
@pytest.fixture
def data():
np.random.seed(1)
x = np.random.rand(1000)*100
data = [(i, 5*i+8) for i in x]
return data
def test_private_SGD(data):
train_data, test_data = data[:800], data[800:]
param = np.random.rand(2) # y = param[0]*x+param[1]
def gradient_function(batch_data):
x, y = batch_data
y_pred = param[0]*x + param[1]
d0 = -2.0 * x * (y-y_pred)
d1 = -2.0 * (y-y_pred)
return [d0, d1]
def get_weights_function():
return np.copy(param)
def learning_rate_function(step):
if step < 10:
return 0.1
elif step < 50:
return 0.01
else:
return 0.005
def update_weights_function(new_weight):
param[:] = new_weight
def test_function():
n = len(test_data)
x = np.array([i[0] for i in test_data])
y = np.array([i[1] for i in test_data])
y_pred = param[0]*x + param[1]
loss = 1.0/n*np.sum((y_pred-y)**2)
check_absolute_error(loss, 0., 20.)
moment_accountant = MomentPrivacyBudgetTracker(PrivacyBudget(10, 0.001))
private_SGD(gradient_function=gradient_function,
get_weights_function=get_weights_function,
update_weights_function=update_weights_function,
learning_rate_function=learning_rate_function,
train_data=train_data,
group_size=100,
gradient_norm_bound=10,
number_of_steps=100,
sigma=1,
moment_privacy_budget_tracker=moment_accountant,
test_interval=100,
test_function=test_function
)
check_absolute_error(moment_accountant.consumed_privacy_budget.epsilon, 8.805554, 1e-6)
check_absolute_error(moment_accountant.consumed_privacy_budget.delta, 0.000625, 1e-6)
```
#### File: private-table/tests/test_privacy_budget.py
```python
from privacy_budget import PrivacyBudget, combine_privacy_losses
def test_combine_privacy_losses():
e1 = PrivacyBudget(1., 0.01)
e2 = PrivacyBudget(0.2, 0.004)
e3 = combine_privacy_losses([e1, e2])
expected_e3 = PrivacyBudget(1. + 0.2, 0.01 + 0.004)
assert e3 == expected_e3
def test_privacy_budget_class():
e1 = PrivacyBudget(1., 0.01)
e2 = PrivacyBudget(0.2, 0.004)
e3 = PrivacyBudget(1 + 0.2, 0.01 + 0.004)
assert e3 == e1 + e2
``` |
{
"source": "JiaqingGe1213/attention_keras",
"score": 3
} |
#### File: examples/nmt_bidirectional/model.py
```python
from tensorflow.python.keras.layers import Input, GRU, Dense, Concatenate, TimeDistributed, Bidirectional
from tensorflow.python.keras.models import Model
from layers.attention import AttentionLayer
def define_nmt(hidden_size, batch_size, en_timesteps, en_vsize, fr_timesteps, fr_vsize):
""" Defining a NMT model """
# Define an input sequence and process it.
if batch_size:
encoder_inputs = Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inputs')
decoder_inputs = Input(batch_shape=(batch_size, fr_timesteps - 1, fr_vsize), name='decoder_inputs')
else:
encoder_inputs = Input(shape=(en_timesteps, en_vsize), name='encoder_inputs')
decoder_inputs = Input(shape=(fr_timesteps - 1, fr_vsize), name='decoder_inputs')
# Encoder GRU
encoder_gru = Bidirectional(GRU(hidden_size, return_sequences=True, return_state=True, name='encoder_gru'), name='bidirectional_encoder')
encoder_out, encoder_fwd_state, encoder_back_state = encoder_gru(encoder_inputs)
# Set up the decoder GRU, using `encoder_states` as initial state.
decoder_gru = GRU(hidden_size*2, return_sequences=True, return_state=True, name='decoder_gru')
decoder_out, decoder_state = decoder_gru(
decoder_inputs, initial_state=Concatenate(axis=-1)([encoder_fwd_state, encoder_back_state])
)
# Attention layer
attn_layer = AttentionLayer(name='attention_layer')
attn_out, attn_states = attn_layer([encoder_out, decoder_out])
# Concat attention input and decoder GRU output
decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([decoder_out, attn_out])
# Dense layer
dense = Dense(fr_vsize, activation='softmax', name='softmax_layer')
dense_time = TimeDistributed(dense, name='time_distributed_layer')
decoder_pred = dense_time(decoder_concat_input)
# Full model
full_model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_pred)
full_model.compile(optimizer='adam', loss='categorical_crossentropy')
full_model.summary()
""" Inference model """
batch_size = 1
""" Encoder (Inference) model """
encoder_inf_inputs = Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inf_inputs')
encoder_inf_out, encoder_inf_fwd_state, encoder_inf_back_state = encoder_gru(encoder_inf_inputs)
encoder_model = Model(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, encoder_inf_fwd_state, encoder_inf_back_state])
""" Decoder (Inference) model """
decoder_inf_inputs = Input(batch_shape=(batch_size, 1, fr_vsize), name='decoder_word_inputs')
encoder_inf_states = Input(batch_shape=(batch_size, en_timesteps, 2*hidden_size), name='encoder_inf_states')
decoder_init_state = Input(batch_shape=(batch_size, 2*hidden_size), name='decoder_init')
decoder_inf_out, decoder_inf_state = decoder_gru(
decoder_inf_inputs, initial_state=decoder_init_state)
attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])
decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])
decoder_inf_pred = TimeDistributed(dense)(decoder_inf_concat)
decoder_model = Model(inputs=[encoder_inf_states, decoder_init_state, decoder_inf_inputs],
outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_state])
return full_model, encoder_model, decoder_model
if __name__ == '__main__':
""" Checking nmt model for toy examples """
define_nmt(64, None, 20, 30, 20, 20)
``` |
{
"source": "JIAQING-XIE/Computer-Communications-and-Networks",
"score": 2
} |
#### File: traffic_analysis/tests/test_pcap_aggr_5k.py
```python
import pytest
from scapy.utils import RawPcapReader
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP
from ipaddress import ip_address, ip_network
from pcap_aggr import Data, Node
from ipaddress import ip_network
testfile1 = '202011251400-78-5k.pcap'
testdata_aggr = {'0.0.0.0/2': 141232, '172.16.17.32/6': 368983, '0.0.0.0/0': 94144, '192.168.127.12/3': 1633302, '192.168.127.12/1': 137206, '172.16.31.10/32': 224411}
def test_pcap_aggr2():
data = Data(testfile1)
for k, v in testdata_aggr.items():
assert data.data[ip_network(k)] == v
``` |
{
"source": "JIAQING-XIE/Fea2Fea",
"score": 2
} |
#### File: Fea2Fea/src/f_f_TU.py
```python
import itertools
import os.path as osp
import pandas as pd
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch_geometric.datasets import TUDataset
import torch_geometric.transforms as T
from torch_geometric.nn import ARMAConv,AGNNConv,global_mean_pool
from torch_geometric.nn import GINConv,GATConv,GCNConv
from torch_geometric.nn import SAGEConv,SplineConv
import math
import matplotlib.pyplot as plt
import seaborn as sns
from torch_geometric.data import DataLoader, Dataset
from torch_scatter import scatter_mean
from graph_property import G_property,binning
from model.GNN import Net, debug_MLP
def reserve(task, dn, loader):
t = 0
for load in loader:
G = []
# construct graph
for p1 in range(np.array(load.edge_index).shape[1]):
G.append((int(load.edge_index[0][p1]),int(load.edge_index[1][p1])))
# calculate graph properties
constant = G_property(G, constant_bool=1)
degrees, graph = G_property(G, degree_bool=1, bin_bool=0)
clustering, graph = G_property(G, clustering_bool=1, bin_bool=0)
pagerank, graph = G_property(G, pagerank_bool=1, bin_bool=0)
avg_path_len_G, graph = G_property(G, avg_path_length_bool=1, bin_bool=0)
matrix = torch.cat((constant,degrees),1)
matrix = torch.cat((matrix,clustering),1)
matrix = torch.cat((matrix,pagerank),1)
matrix = torch.cat((matrix,avg_path_len_G),1)
matrix = matrix.numpy()
matrix = pd.DataFrame(matrix,columns = ['Constant_feature','Degree','Clustering_coefficient','Pagerank','Aver_path_len'])
name = r'/home/jiaqing/桌面/Fea2Fea/Result/TUdataset/' + dn + '/' + dn + '_property' + str(t) + task +'.txt'
matrix.to_csv(name, sep = '\t', index=False)
t+=1
def train(i, j, dn, model, task, optimizer, train_loader, device, k = 6):
total_loss = 0
model.train()
total_num_nodes = 0
t= 0
for load in train_loader:
name = r'/home/jiaqing/桌面/Fea2Fea/Result/TUdataset/' + dn + '/' + dn + '_property' + str(t) + task +'.txt'
property_file = pd.read_csv(name, sep = '\t')
propert_i = property_file.iloc[:,list(i)] if isinstance(i,tuple) else property_file.iloc[:,[i]]
array = np.array(propert_i)
load.x = torch.tensor(array).float()
propert_j = property_file.iloc[:,[j]]
array_2 = np.array(propert_j)
number = len(array_2)
load.y = binning(array_2, k = k, data_len = number)
# --------- training loop ---------- #
load = load.to(device)
optimizer.zero_grad()
out = model(load)
loss = F.nll_loss(out,load.y)
loss.backward()
optimizer.step()
total_loss += loss.item() * len(load.y)
total_num_nodes+=len(load.y)
t+=1
#print(loss)
train_loss = total_loss / total_num_nodes
return train_loss
def valid(i, j, dn, model, task, valid_loader, device, k = 6):
correct = 0
model.eval()
total_num_nodes = 0
t = 0
for load in valid_loader:
name = r'/home/jiaqing/桌面/Fea2Fea/Result/TUdataset/' + dn + '/' + dn + '_property' + str(t) + task +'.txt'
property_file = pd.read_csv(name, sep = '\t')
propert_i = property_file.iloc[:,list(i)] if isinstance(i,tuple) else property_file.iloc[:,[i]]
array = np.array(propert_i)
load.x = torch.tensor(array).float()
propert_j = property_file.iloc[:,[j]]
array_2 = np.array(propert_j)
number = len(array_2)
load.y = binning(array_2, k = k,data_len = number)
with torch.no_grad():
load = load.to(device)
pred = model(load).max(dim=1)[1]
correct += pred.eq(load.y).sum().item()
total_num_nodes+=len(load.y)
t+=1
valid_acc = correct / total_num_nodes
return valid_acc
def test(i, j, dn, model, task, test_loader, device, k = 6):
correct = 0
model.eval()
total_num_nodes = 0
t = 0
for load in test_loader:
name = r'/home/jiaqing/桌面/Fea2Fea/Result/TUdataset/' + dn + '/' + dn + '_property' + str(t) + task +'.txt'
property_file = pd.read_csv(name, sep = '\t')
propert_i = property_file.iloc[:,list(i)] if isinstance(i,tuple) else property_file.iloc[:,[i]]
array = np.array(propert_i)
load.x = torch.tensor(array).float()
propert_j = property_file.iloc[:,[j]]
array_2 = np.array(propert_j)
number = len(array_2)
load.y = binning(array_2, k = k,data_len = number)
with torch.no_grad():
load = load.to(device)
pred = model(load).max(dim=1)[1]
correct += pred.eq(load.y).sum().item()
total_num_nodes+=len(load.y)
t+=1
test_acc = correct / total_num_nodes
return test_acc
if __name__ == '__main__':
dataset_name = [ 'ENZYMES','PROTEINS', 'NCI1']
#
GNN_model = ['GIN','SAGE','GAT', 'GCN', 'MLP']
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
for dn, embedding_method in list(itertools.product(dataset_name, GNN_model)):
Aver = np.zeros((5,5))
# make sure that your dataset is reserved in /tmp/dn/dn/...
dataset = TUDataset(root = '/home/jiaqing/桌面/Fea2Fea/data/' + dn, name = dn, use_node_attr = False)
# batch size is the parameter
# print(len(dataset))
train_len, valid_len= int(0.8 * len(dataset)), int(0.1 * len(dataset))
test_len = len(dataset) - train_len - valid_len
batchsize = 16 if dn != 'NCI1' else 32
train_loader = DataLoader(dataset[0:train_len], batch_size = batchsize , shuffle=False) #### batch size 32 for NCI1
valid_loader = DataLoader(dataset[train_len:(train_len+valid_len)], batch_size = batchsize , shuffle = False) #### batch size 32 for NCI1
test_loader = DataLoader(dataset[(train_len+valid_len):len(dataset)], batch_size = batchsize , shuffle = False) #### batch size 32 for NCI1
# for each batch, calculate the feature properties
# if you have reserved once, you do not need to reserve again since it takes a lot of time!!! so comment them
#reserve('train', dn, train_loader)
#reserve('valid', dn, valid_loader)
#reserve('test', dn, test_loader)
avg_num = 10
for avg in range(avg_num):
R = np.zeros((5,5)) # initialize our feature relationship matrix
R[0][0] = 1.000
# i is the featire taken as input, j is the predicted feature
for i in range(5):
for j in range(1,5):
model = Net(embedding=embedding_method).to(device) if embedding_method != 'MLP' else debug_MLP().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr = 0.04)
#scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.8)
# record epoch
best_epoch = 0
best_valid_acc = 0
best_test_acc = 0
op_iters = 0
for epoch in range(1, 200):
if dn == 'NCI1':
if j == 2 or i == 2:
R[i][j] = 0
R[j][i] = 0
break
# for train
t_loss = train(i, j, dn, model, 'train', optimizer, train_loader, device)
# for valid
v_acc = valid(i, j, dn, model, 'valid', optimizer, valid_loader, device)
# for test
t_acc = test(i, j, dn, model, 'test', optimizer, test_loader, device)
print('Epoch {:03d}, Train Loss: {:.4f}, Valid acc :{:.4f}, Test acc : {:.4f}'.format(
epoch, t_loss, v_acc, t_acc ))
if v_acc > best_valid_acc:
best_valid_acc = v_acc
best_test_acc = t_acc
best_epoch = epoch
if i == 0:
R[i][j] = round(t_acc,3)
R[j][i] = round(t_acc,3)
else:
R[i][j] = round(t_acc,3)
op_iters=0
op_iters+=1
if op_iters > 20:
break
if i == 4 and j == 4:
Aver+=R
if avg == 9:
k = Aver / 10
np.set_printoptions(precision=3)
k = pd.DataFrame(k)
filepath = '/home/jiaqing/桌面/Fea2Fea/Result/TUdataset'
fig_name = '/' +dn + '_' + embedding_method + '.txt'
fig_path = filepath + fig_name
k.to_csv(fig_path, header = None, index = None, sep = '\t')
#----------- save Heatmap Matrix-----------#
filepath = '/home/jiaqing/桌面/Fea2Fea/Result/TUdataset'
fig_name = '/' +dn + '_' + embedding_method + '_property' + '.eps'
fig_path = filepath + fig_name
xlabels = ['Constant','Degree','Clustering','PageRank','Aver_Path_Len']
ylabels = ['Constant','Degree','Clustering','PageRank','Aver_Path_Len']
cm = sns.heatmap(k,annot=True,cmap="Blues",cbar = False, square=True,
xticklabels = xlabels, yticklabels = ylabels)
cm.set_xticklabels(cm.get_xticklabels(), rotation=30)
cm.set_yticklabels(cm.get_xticklabels(), rotation=0)
label = embedding_method
cm.set_title(label)
heatmap = cm.get_figure()
heatmap.savefig(fig_path, dpi = 400,bbox_inches='tight')
plt.show()
break
#scheduler.step()
``` |
{
"source": "JIAQING-XIE/Google_NLP_DL",
"score": 3
} |
#### File: tor/model_lstm/lstm_crf.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from config import *
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from lstm import LSTM
class BiLSTM_CRF(nn.Module):
def __init__(self, tag_to_ix, embedding_dim, hidden_dim, word_embedding_matrix):
super(BiLSTM_CRF, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.tag_to_ix = tag_to_ix
self.tagset_size = len(tag_to_ix)
word_embedding_matrix = torch.from_numpy(word_embedding_matrix)
self.word_embeds = nn.Embedding.from_pretrained(word_embedding_matrix)
self.word_embeds.weight.requires_grad = False
self.lstm = nn.LSTM(input_size=embedding_dim, hidden_size=hidden_dim // 2, bidirectional=True, batch_first=True)
# Maps the output of the LSTM into tag space.
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)
# Matrix of transition parameters. Entry i,j is the score of
# transitioning *to* i *from* j.
self.transitions = nn.Parameter(
torch.randn(self.tagset_size, self.tagset_size))
# These two statements enforce the constraint that we never transfer
# to the start tag and we never transfer from the stop tag
self.transitions.data[tag_to_ix[START_TAG], :] = -10000
self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000
self.hidden = self.init_hidden()
def init_hidden(self):
return (torch.randn(2, 1, self.hidden_dim // 2),
torch.randn(2, 1, self.hidden_dim // 2))
def _forward_alg(self, feats):
# Do the forward algorithm to compute the partition function
init_alphas = torch.full((1, self.tagset_size), -10000.)
# START_TAG has all of the score.
init_alphas[0][self.tag_to_ix[START_TAG]] = 0.
# Wrap in a variable so that we will get automatic backprop
forward_var = init_alphas
# Iterate through the sentence
for feat in feats:
alphas_t = [] # The forward tensors at this timestep
for next_tag in range(self.tagset_size):
# broadcast the emission score: it is the same regardless of
# the previous tag
emit_score = feat[next_tag].view(
1, -1).expand(1, self.tagset_size)
# the ith entry of trans_score is the score of transitioning to
# next_tag from i
trans_score = self.transitions[next_tag].view(1, -1)
# The ith entry of next_tag_var is the value for the
# edge (i -> next_tag) before we do log-sum-exp
next_tag_var = forward_var + trans_score + emit_score
# The forward variable for this tag is log-sum-exp of all the
# scores.
alphas_t.append(log_sum_exp(next_tag_var).view(1))
forward_var = torch.cat(alphas_t).view(1, -1)
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
alpha = log_sum_exp(terminal_var)
return alpha
def _get_lstm_features(self, sentence):
self.hidden = self.init_hidden()
print(self.word_embeds(sentence).size())
embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)
print(embeds.size())
lstm_out, self.hidden = self.lstm(embeds, self.hidden)
lstm_out = lstm_out.view(len(sentence), self.hidden_dim)
lstm_feats = self.hidden2tag(lstm_out)
return lstm_feats
def _score_sentence(self, feats, tags):
# Gives the score of a provided tag sequence
score = torch.zeros(1)
tags = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long), tags])
for i, feat in enumerate(feats):
score = score + \
self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]
score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]
return score
def _viterbi_decode(self, feats):
backpointers = []
# Initialize the viterbi variables in log space
init_vvars = torch.full((1, self.tagset_size), -10000.)
init_vvars[0][self.tag_to_ix[START_TAG]] = 0
# forward_var at step i holds the viterbi variables for step i-1
forward_var = init_vvars
for feat in feats:
bptrs_t = [] # holds the backpointers for this step
viterbivars_t = [] # holds the viterbi variables for this step
for next_tag in range(self.tagset_size):
# next_tag_var[i] holds the viterbi variable for tag i at the
# previous step, plus the score of transitioning
# from tag i to next_tag.
# We don't include the emission scores here because the max
# does not depend on them (we add them in below)
next_tag_var = forward_var + self.transitions[next_tag]
best_tag_id = argmax(next_tag_var)
bptrs_t.append(best_tag_id)
viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))
# Now add in the emission scores, and assign forward_var to the set
# of viterbi variables we just computed
forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)
backpointers.append(bptrs_t)
# Transition to STOP_TAG
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
best_tag_id = argmax(terminal_var)
path_score = terminal_var[0][best_tag_id]
# Follow the back pointers to decode the best path.
best_path = [best_tag_id]
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
# Pop off the start tag (we dont want to return that to the caller)
start = best_path.pop()
assert start == self.tag_to_ix[START_TAG] # Sanity check
best_path.reverse()
return path_score, best_path
def neg_log_likelihood(self, sentence, tags):
feats = self._get_lstm_features(sentence)
forward_score = self._forward_alg(feats)
gold_score = self._score_sentence(feats, tags)
return forward_score - gold_score
def forward(self, sentence): # dont confuse this with _forward_alg above.
# Get the emission scores from the BiLSTM
lstm_feats = self._get_lstm_features(sentence)
# Find the best path, given the features.
score, tag_seq = self._viterbi_decode(lstm_feats)
return score, tag_seq
def adjust_learning_rate(self,lr, optimizer, epoch,):
lr = lr / (1 + epoch * decay_rate)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
``` |
{
"source": "jiaqinz/django-guacamole",
"score": 3
} |
#### File: guacamole-bak/io/GuacamoleWriter.py
```python
class GuacamoleWriter(object):
def write(self, chunk):
raise Exception("Not Implemented")
def writeInstruction(self, instruction):
raise Exception("Not Implemented")
```
#### File: guacamole-bak/net/AbstractGuacamoleTunnel.py
```python
import threading
from GuacamoleTunnel import GuacamoleTunnel
from guacamole.io.ReaderGuacamoleReader import ReaderGuacamoleReader
from guacamole.io.WriterGuacamoleWriter import WriterGuacamoleWriter
class AbstractGuacamoleTunnel(GuacamoleTunnel):
def __init__(self):
self.reader_lock = threading.Lock()
self.writer_lock = threading.Lock()
# Need to know if there's another thread waiting
self.read_waiters = 0
self.read_waiters_lock = threading.Lock()
def acquireReader(self):
with self.read_waiters_lock:
self.read_waiters += 1
self.reader_lock.acquire()
with self.read_waiters_lock:
self.read_waiters -= 1
return self.getSocket().getReader()
def hasQueuedReaderThreads(self):
with self.read_waiters_lock:
return self.read_waiters > 0
def releaseReader(self):
self.reader_lock.release()
def acquireWriter(self):
self.writer_lock.acquire()
return self.getSocket().getWriter()
def releaseWriter(self):
self.writer_lock.release()
def isOpen(self):
return self.getSocket().isOpen()
def close(self):
self.getSocket().close()
```
#### File: guacamole-bak/net/GuacamoleSocket.py
```python
class GuacamoleSocket(object):
def getReader(self):
raise Exception("Not Implemented")
def getWriter(self):
raise Exception("Not Implemented")
def isOpen(self):
raise Exception("Not Implemented")
def close(self):
raise Exception("Not Implemented")
```
#### File: guacamole-bak/net/InetGuacamoleSocket.py
```python
import logging
import socket
from GuacamoleSocket import GuacamoleSocket
from guacamole.exceptions import GuacamoleUpstreamTimeoutException, GuacamoleServerException
from guacamole.io.ReaderGuacamoleReader import ReaderGuacamoleReader
from guacamole.io.WriterGuacamoleWriter import WriterGuacamoleWriter
class InetGuacamoleSocket(GuacamoleSocket):
SOCKET_TIMEOUT = 15000
def __init__(self, host, port):
try:
logging.debug('Connecting to guacd at %s:%d', host, port)
self.socket = socket.create_connection((host, port), timeout=InetGuacamoleSocket.SOCKET_TIMEOUT)
self.reader = ReaderGuacamoleReader(self.socket)
self.writer = WriterGuacamoleWriter(self.socket)
except socket.timeout as e:
raise GuacamoleUpstreamTimeoutException(e)
except socket.error as e:
raise GuacamoleServerException(e)
self._open = True
def close(self):
try:
logging.debug('Closing connection to guacd.')
self._open = False
self.socket.close()
except socket.error as e:
raise GuacamoleServerException(e)
def getReader(self):
return self.reader
def getWriter(self):
return self.writer
def isOpen(self):
return self._open
```
#### File: guacamole-bak/protocol/ConfiguredGuacamoleSocket.py
```python
from guacamole.exceptions import GuacamoleServerException
from guacamole.net.GuacamoleSocket import GuacamoleSocket
from GuacamoleClientInformation import GuacamoleClientInformation
from GuacamoleInstruction import GuacamoleInstruction
class ConfiguredGuacamoleSocket(GuacamoleSocket):
def __init__(self, socket, config, info=None):
if not info:
info = GuacamoleClientInformation()
self._socket = socket
self._config = config
self._info = info
self._id = None
reader = self._socket.getReader()
writer = self._socket.getWriter()
select_arg = config.connectionID
if not select_arg:
select_arg = config.protocol
writer.writeInstruction(GuacamoleInstruction('select', select_arg))
args = self.expect(reader, "args")
arg_names = args.instructions
arg_values = []
for name in arg_names:
arg_values.append(config.getParameter(str(name)) or '')
writer.writeInstruction(GuacamoleInstruction('size',
str(info.optimalScreenWidth),
str(info.optimalScreenHeight),
str(info.optimalScreenResolution)))
writer.writeInstruction(GuacamoleInstruction('audio', info.audioMimetypes))
writer.writeInstruction(GuacamoleInstruction('video', info.videoMimetypes))
writer.writeInstruction(GuacamoleInstruction('image', info.imageMimetypes))
writer.writeInstruction(GuacamoleInstruction('connect', arg_values))
ready = self.expect(reader, "ready")
ready_args = ready.instructions
if len(ready_args) == 0:
raise GuacamoleServerException("No connection ID received")
self._id = ready_args[0]
def expect(self, reader, opcode):
instruction = reader.readInstruction()
if not instruction:
raise GuacamoleServerException('End of stream while waiting for ' + opcode)
if instruction.opcode != opcode:
raise GuacamoleServerException('Received "{}" instruction while expecting "{}"'.format(instruction.opcode, opcode))
return instruction
@property
def config(self):
return self._config
@property
def connectionID(self):
return self._id
def getReader(self):
return self._socket.getReader()
def getWriter(self):
return self._socket.getWriter()
def close(self):
self._socket.close()
def isOpen(self):
return self._socket.isOpen()
```
#### File: guacamole-bak/protocol/GuacamoleClientInformation.py
```python
class GuacamoleClientInformation(object):
def __init__(self):
self._optimalScreenWidth = 1024
self._optimalScreenHeight = 768
self._optimalScreenResolution = 96
self._audioMimetypes = []
self._videoMimetypes = []
self._imageMimetypes = []
@property
def optimalScreenWidth(self):
return self._optimalScreenWidth
@optimalScreenWidth.setter
def optimalScreenWidth(self, value):
self._optimalScreenWidth = value
@property
def optimalScreenHeight(self):
return self._optimalScreenHeight
@optimalScreenHeight.setter
def optimalScreenHeight(self, value):
self._optimalScreenHeight = value
@property
def optimalScreenResolution(self):
return self._optimalScreenResolution
@optimalScreenResolution.setter
def optimalScreenResolution(self, value):
self._optimalScreenResolution = value
@property
def audioMimetypes(self):
return self._audioMimetypes
@audioMimetypes.setter
def audioMimetypes(self, value):
self._audioMimetypes = value
@property
def videoMimetypes(self):
return self._videoMimetypes
@videoMimetypes.setter
def videoMimetypes(self, value):
self._videoMimetypes = value
@property
def imageMimetypes(self):
return self._imageMimetypes
@imageMimetypes.setter
def imageMimetypes(self, value):
self._imageMimetypes = value
```
#### File: guacamole-bak/protocol/GuacamoleConfiguration.py
```python
class GuacamoleConfiguration(object):
def __init__(self, protocol=None):
self.connectionID = None
self.protocol = protocol
self.parameters = {}
@property
def connectionID(self):
return self._connectionID
@connectionID.setter
def connectionID(self, connectionID):
self._connectionID = connectionID
@property
def protocol(self):
return self._protocol
@protocol.setter
def protocol(self, protocol):
self._protocol = protocol
@property
def parameters(self):
return self._parameters
@parameters.setter
def parameters(self, parameters):
self._parameters = parameters
def getParameter(self, paramName):
return self._parameters.get(paramName, None)
def delParameter(self, paramName):
del self._parameters[paramName]
def setParameter(self, paramName, paramValue):
self._parameters[paramName] = paramValue
```
#### File: django-guacamole/guacamole/guacamolethreading.py
```python
import threading
try:
import simplejson as json
except ImportError:
import json
def get_redis_instance():
from django_guacamole.asgi import channel_layer
return channel_layer._connection_list[0]
import ast
import logging
logger = logging.getLogger(__name__)
import time
class GuacamoleThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self,message,client):
super(GuacamoleThread, self).__init__()
self._stop_event = threading.Event()
self.message = message
self.queue = self.redis_queue()
self.client = client
self.read_lock = threading.RLock()
self.write_lock = threading.RLock()
self.pending_read_request = threading.Event()
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
def redis_queue(self):
redis_instance = get_redis_instance()
redis_sub = redis_instance.pubsub()
redis_sub.subscribe(self.message.reply_channel.name)
return redis_sub
def run(self):
#while (not self._stop_event.is_set()):
#text = self.queue.get_message()
#if text:
#if isinstance(data,(list,tuple)):
#if data[0] == 'close':
#self.stop()
from django_guacamole.asgi import channel_layer
with self.read_lock:
#self.pending_read_request.clear()
while True:
instruction = self.client.receive()
if instruction:
channel_layer.send(self.message.reply_channel.name,{"text":instruction})
else:
break
#if self.pending_read_request.is_set():
#logger.info('Letting another request take over.')
#break
# End-of-instruction marker
channel_layer.send(self.message.reply_channel.name,{"text":'0.;'})
class GuacamoleThreadWrite(GuacamoleThread):
def run(self):
while True:
text = self.queue.get_message()
try:
data = ast.literal_eval(text['data'])
except Exception,e:
if isinstance(text,dict) and text.has_key('data'):
data = text['data']
elif isinstance(text,(unicode,basestring)):
data = text
else:
data = text
if data:
if isinstance(data,(list,tuple)):
if data[0] == 'close':
self.stop()
if isinstance(data,(long,int)) and data == 1:
pass
else:
#print 'write',data
with self.write_lock:
self.client.send(str(data))
else:
time.sleep(0.001)
``` |
{
"source": "jiaqiwang969/Kratos-test",
"score": 2
} |
#### File: test_exemples/dam2d_benchmarking.gid/dam2d.py
```python
domain_size = 2
##################################################################
##################################################################
## ATTENTION: here the order is important
#including kratos path
kratos_libs_path = '../../../../libs' ##kratos_root/libs
kratos_applications_path = '../../../../applications' ##kratos_root/applications
kratos_benchmarking_path = '../../../../benchmarking' ##kratos_root/benchmarking
import sys
sys.path.append(kratos_libs_path)
sys.path.append(kratos_applications_path)
sys.path.append(kratos_benchmarking_path)
#importing Kratos main library
from Kratos import *
kernel = Kernel() #defining kernel
#importing applications
import applications_interface
applications_interface.Import_IncompressibleFluidApplication = True
applications_interface.Import_PFEMApplication = True
applications_interface.ImportApplications(kernel, kratos_applications_path)
import benchmarking
## from now on the order is not anymore crucial
##################################################################
##################################################################
def FindNode(node_list,x,y,z):
for node in node_list:
if ((node.X - x) ** 2 + (node.Y - y) ** 2 + (node.Z - z) ** 2 < 0.0000001):
return node
def BenchmarkCheck(time, node1, node2):
benchmarking.Output(time, "Time")
benchmarking.Output(node1.GetSolutionStepValue(PRESSURE), "Node 1 pressure", 1.0)
benchmarking.Output(node2.GetSolutionStepValue(PRESSURE), "Node 2 pressure", 1.0)
#defining a model part
model_part = ModelPart("FluidPart");
print "aaa"
#importing the solver files and adding the variables
import pfem_solver_ale
pfem_solver_ale.AddVariables(model_part)
#reading a model
gid_mode_flag = GiDPostMode.GiD_PostBinary
use_multifile = MultiFileFlag.MultipleFiles
deformed_print_flag = WriteDeformedMeshFlag.WriteDeformed
write_conditions = WriteConditionsFlag.WriteConditions
gid_io = GidIO("dam2d",gid_mode_flag,use_multifile,deformed_print_flag,write_conditions)
gid_io.ReadModelPart(model_part)
print model_part
#the buffer size should be set up here after the mesh is read for the first time
model_part.SetBufferSize(3)
#adding dofs
pfem_solver_ale.AddDofs(model_part)
#setting the limits of the bounding box
box_corner1 = Vector(3); box_corner1[0]=-0.1; box_corner1[1]=-0.1; box_corner1[2]=-0.1;
box_corner2 = Vector(3); box_corner2[0]=1.1; box_corner2[1]=1.1; box_corner2[2]=-0.0;
#creating a fluid solver object
name = str("dam2d")
solver = pfem_solver_ale.PFEMSolver(model_part,name,box_corner1,box_corner2,domain_size)
##solver.laplacian_form = 2
solver.echo_level = 0
solver.prediction_order = 1
solver.predictor_corrector = False
solver.smooth = False
#solver.vel_toll = 1e-12
#solver.max_vel_its = 20;
##solver.echo_level = 2
solver.pressure_linear_solver = SkylineLUFactorizationSolver()
solver.velocity_linear_solver = SkylineLUFactorizationSolver()
gravity = Vector(3)
gravity[0] = 0.00; gravity[1] = -9.81; gravity[2] = 0.0;
for node in model_part.Nodes:
node.SetSolutionStepValue(VISCOSITY,0,0.000001)
node.SetSolutionStepValue(DENSITY,0,1000.00000)
node.SetSolutionStepValue(BODY_FORCE,0,gravity)
Dt = 0.01
nsteps = 10
output_Dt = 0.05
min_dt = 0.005
max_dt = 0.02
safety_factor = 0.5;
##########################
node_1 = FindNode(model_part.Nodes, 0.5, 0.0, 0.0)
node_2 = FindNode(model_part.Nodes, 0.24, 0.0, 0.0)
##########################
#initializing the solver
solver.Initialize(Dt,output_Dt)
time = Dt
for step in range(0,nsteps):
print "solution step =" , step
new_Dt = solver.EstimateDeltaTime(min_dt,max_dt)
# print "time = ", time, " new_Dt= ",new_Dt," step = ", step
#new_Dt = Dt
time = time + new_Dt*safety_factor
#time = Dt*step
model_part.CloneTimeStep(time)
print time
#print model_part.ProcessInfo()[TIME]
#solving the fluid problem
print "qui"
if(step > 3):
solver.Solve(time,gid_io)
## if(step > 4):
## solver.box_corner2[1] = 0.1
print "li"
BenchmarkCheck(time, node_1, node_2)
print "solution finished"
```
#### File: structural_application/custom_problemtype/write_cond_bas.py
```python
import string;
import basicfunctions;
################################################################################################
def WriteScalarBC(name,condtype,projectname,domaintype):
section=''
setadd='Set'
if (string.count(condtype,'v')>=1):
section = section + CondSetCond(name,'volume',setadd,domaintype)
setadd='Add'
if (string.count(condtype,'s')>=1):
section = section + CondSetCond(name,'surface',setadd,domaintype)
setadd='Add'
if (string.count(condtype,'l')>=1):
section = section + CondSetCond(name,'line',setadd,domaintype)
setadd='Add'
if (string.count(condtype,'p')>=1):
section = section + CondSetCond(name,'point',setadd,domaintype)
setadd='Add'
if setadd == 'Add':
section = section + CondScalarSection(name)
if domaintype=='Fluid':
basicfunctions.addtofile('012_'+projectname+'.fluid.cond.bas',section)
if domaintype=='Structure':
basicfunctions.addtofile('022_'+projectname+'.structure.cond.bas',section)
def WriteVectorBC(name,condtype,projectname,domaintype):
section=''
setadd='Set'
if (string.count(condtype,'v')>=1):
section = section + CondSetCond(name,'volume',setadd,domaintype)
setadd='Add'
if (string.count(condtype,'s')>=1):
section = section + CondSetCond(name,'surface',setadd,domaintype)
setadd='Add'
if (string.count(condtype,'l')>=1):
section = section + CondSetCond(name,'line',setadd,domaintype)
setadd='Add'
if (string.count(condtype,'p')>=1):
section = section + CondSetCond(name,'point',setadd,domaintype)
setadd='Add'
if setadd == 'Add':
section = section + CondVectorSection(name)
if domaintype=='Fluid':
basicfunctions.addtofile('012_'+projectname+'.fluid.cond.bas',section)
if domaintype=='Structure':
basicfunctions.addtofile('022_'+projectname+'.structure.cond.bas',section)
def WriteConditions(name,condtype,projectname,domaintype):
section=''
if (string.count(condtype,'p')>=1):
section = section + ConditionSection(name,'point')
if (string.count(condtype,'l')>=1):
section = section + ConditionSection(name,'line')
if (string.count(condtype,'s')>=1):
section = section + ConditionSection(name,'surface')
if (string.count(condtype,'v')>=1):
section = section + ConditionSection(name,'volume')
if domaintype=='Fluid':
basicfunctions.addtofilebeginning('012_'+projectname+'.fluid.cond.bas',section)
if domaintype=='Structure':
basicfunctions.addtofilebeginning('022_'+projectname+'.structure.cond.bas',section)
#####################################################################################################
def CondSetCond(name,condtype,setadd,domaintype):
return '*'+setadd+' cond '+condtype+'_'+name+'_('+domaintype+')'+' *nodes\n'
def CondScalarSection(name):
section ='*loop nodes *OnlyInCond\n*format "%i%f"\n'
section = section + 'NODES[*NodesNum].Fix('+name+');\n'
section = section + '*end nodes\n'
return section
def CondVectorSection(name):
section ='*loop nodes *OnlyInCond\n'
section = section + '*if(strcmp(cond('+name+'_X),"1")==0)\n*format "%i%f"\n'
section = section + 'NODES[*NodesNum].Fix('+name+'_X);\n'
section = section + '*endif\n*if(strcmp(cond('+name+'_Y),"1")==0)\n*format "%i%f"\n'
section = section + 'NODES[*NodesNum].Fix('+name+'_Y);\n'
section = section + '*endif\n*if(strcmp(cond('+name+'_Z),"1")==0)\n*format "%i%f"\n'
section = section + 'NODES[*NodesNum].Fix('+name+'_Z);\n'
section = section + '*endif\n*end nodes\n'
return section
def ConditionSection(name,condtype):
section='*Set cond '+condtype+'_'+name+' *elems\n'
section = section + '*loop elems *onlyInCond\n*Set var i=0\n*set var j= ElemsNnode\n*format "%i%i%i%i%i%i%i%i"\n'
section = section + 'CONDITIONS[*ElemsNum] = '+name+'([*\\\n'
section = section + '*for(i=1;i<j;i=i+1)*\\\n*ElemsConec(*i),*\\\n*end*\\\n*ElemsConec(*ElemsNnode)],*ElemsMat);\n*end elems\n'
return section
```
#### File: structural_application/python_scripts/structural_solver_multiphase.py
```python
from Kratos import *
from KratosStructuralApplication import *
def AddVariables(model_part):
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE);
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE_NULL);
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE_EINS);
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE_DT);
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE_NULL_DT);
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE_EINS_DT);
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE_ACCELERATION);
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE_NULL_ACCELERATION);
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE_EINS_ACCELERATION);
model_part.AddNodalSolutionStepVariable(REACTION_AIR_PRESSURE);
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE);
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE_NULL);
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE_EINS);
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE_DT);
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE_NULL_DT);
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE_EINS_DT);
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE_ACCELERATION);
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE_NULL_ACCELERATION);
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE_EINS_ACCELERATION);
model_part.AddNodalSolutionStepVariable(REACTION_WATER_PRESSURE);
model_part.AddNodalSolutionStepVariable(DISPLACEMENT);
model_part.AddNodalSolutionStepVariable(VELOCITY);
model_part.AddNodalSolutionStepVariable(ACCELERATION);
model_part.AddNodalSolutionStepVariable(REACTION);
model_part.AddNodalSolutionStepVariable(NEGATIVE_FACE_PRESSURE);
model_part.AddNodalSolutionStepVariable(POSITIVE_FACE_PRESSURE);
model_part.AddNodalSolutionStepVariable(DISPLACEMENT_OLD);
model_part.AddNodalSolutionStepVariable(DISPLACEMENT_NULL);
model_part.AddNodalSolutionStepVariable(DISPLACEMENT_EINS);
model_part.AddNodalSolutionStepVariable(DISPLACEMENT_DT);
model_part.AddNodalSolutionStepVariable(DISPLACEMENT_NULL_DT);
model_part.AddNodalSolutionStepVariable(DISPLACEMENT_EINS_DT);
model_part.AddNodalSolutionStepVariable(ACCELERATION_NULL);
model_part.AddNodalSolutionStepVariable(ACCELERATION_EINS);
model_part.AddNodalSolutionStepVariable(ELASTIC_LEFT_CAUCHY_GREEN_OLD);
model_part.AddNodalSolutionStepVariable(INSITU_STRESS);
model_part.AddNodalSolutionStepVariable(FACE_LOAD);
print "variables for the dynamic structural solution added correctly"
def AddDofs(model_part):
for node in model_part.Nodes:
#adding dofs
node.AddDof(WATER_PRESSURE);
node.AddDof(AIR_PRESSURE);
node.AddDof(DISPLACEMENT_X);
node.AddDof(DISPLACEMENT_Y);
node.AddDof(DISPLACEMENT_Z);
print "dofs for the dynamic structural solution added correctly"
class MultiphaseSolver:
#######################################################################
def __init__(self,model_part,domain_size):
self.model_part = model_part
self.echo_level = 0
self.damp_factor = 1.0
self.toll = 1e-5
self.absolute_tol = 1e-8
#definition of the solvers
self.structure_linear_solver = SkylineLUFactorizationSolver()
#definition of the convergence criteria
#self.conv_criteria = MultiPhaseFlowCriteria(1e-6, 1e-6)
#######################################################################
def Initialize(self):
self.time_scheme = ResidualBasedNewmarkScheme(self.damp_factor)
#definition of the convergence criteria
self.conv_criteria = MultiPhaseFlowCriteria(self.toll,self.absolute_tol)
#definition of BuilderAndSolver
#builder_and_solver = ResidualBasedEliminationBuilderAndSolver(self.structure_linear_solver)
#creating the solution strategy
self.solver = ResidualBasedNewtonRaphsonStrategy(self.model_part,self.time_scheme,self.structure_linear_solver,self.conv_criteria,12,False,True,False)
print "self.echo_level = " , self.echo_level
(self.solver).SetEchoLevel(self.echo_level)
print "finished initialization of the multiphase strategy"
#######################################################################
def Solve(self):
(self.solver).Solve()
```
#### File: test_examples/balken.gid/balken_include.py
```python
kratos_libs_path = '/home/hurga/kratos_local/kratos/libs/' ##kratos_root/libs
#kratos_libs_path = '/home/hurga/kratosR1/libs/' ##kratos_root/libs
kratos_applications_path = '/home/hurga/kratos_local/kratos/applications/' ##kratos_root/applications
#kratos_applications_path = '/home/hurga/kratosR1/applications/' ##kratos_root/applications
ekate_auxiliary_path = '/home/hurga/kratos_local/kratos/applications/ekate_auxiliary_application/python_scripts'
#ekate_auxiliary_path = '/home/hurga/kratosR1/applications/ekate_auxiliary_application/python_scripts'
##################################################################
##################################################################
import sys
sys.path.append(kratos_libs_path)
sys.path.append(kratos_applications_path)
sys.path.append(ekate_auxiliary_path)
#importing Kratos main library
from Kratos import *
kernel = Kernel() #defining kernel
#importing applications
import applications_interface
applications_interface.Import_StructuralApplication = True
applications_interface.Import_EkateAuxiliaryApplication = True
applications_interface.Import_ExternalConstitutiveLawsApplication = True
applications_interface.ImportApplications(kernel, kratos_applications_path)
from KratosStructuralApplication import *
from KratosExternalSolversApplication import *
from KratosEkateAuxiliaryApplication import *
from KratosExternalConstitutiveLawsApplication import *
##################################################################
##################################################################
class Model:
def __init__( self, problem_name, path ):
#setting the domain size for the problem to be solved
self.domain_size = 3
##################################################################
## DEFINE MODELPART ##############################################
##################################################################
self.model_part = ModelPart("ekate_simulation")
self.path = path
self.problem_name = problem_name
##################################################################
## DEFINE SOLVER #################################################
##################################################################
# reading simulation parameters
number_of_time_steps = 1
self.analysis_parameters = []
# content of analysis_parameters:
# perform_contact_analysis_flag
# penalty value for normal contact
# maximum number of uzawa iterations
# friction coefficient
# penalty value for frictional contact
# contact_double_check_flag
# contact_ramp_penalties_flag
# maximum penalty value for normal contact
# ramp criterion for normal contact
# ramp factor for normal contact
# maximum penalty value for frictional contact
# ramp criterion for frictional contact
# ramp factor for frictional contact
perform_contact_analysis_flag = True
# performing contact analysis: reading contact parameters
penalty = 1e+10
maxuzawa = 25
friction = 0
frictionpenalty = 1e+05
contact_double_check_flag = False
contact_ramp_penalties_flag = False
maxpenalty = penalty
rampcriterion = 0.0
rampfactor = 0.0
fricmaxpenalty = penalty
fricrampcriterion = 0.0
fricrampfactor = 0.0
self.analysis_parameters.append(perform_contact_analysis_flag)
self.analysis_parameters.append(penalty)
self.analysis_parameters.append(maxuzawa)
self.analysis_parameters.append(friction)
self.analysis_parameters.append(frictionpenalty)
self.analysis_parameters.append(contact_double_check_flag)
self.analysis_parameters.append(contact_ramp_penalties_flag)
self.analysis_parameters.append(maxpenalty)
self.analysis_parameters.append(rampcriterion)
self.analysis_parameters.append(rampfactor)
self.analysis_parameters.append(fricmaxpenalty)
self.analysis_parameters.append(fricrampcriterion)
self.analysis_parameters.append(fricrampfactor)
abs_tol = 1e-06
rel_tol = 0.0001
## generating solver
import ekate_solver
self.solver = ekate_solver.EkateSolver( self.model_part, self.domain_size, number_of_time_steps, self.analysis_parameters, abs_tol, rel_tol )
ekate_solver.AddVariables( self.model_part )
##################################################################
## READ MODELPART ################################################
##################################################################
#reading a model
write_deformed_flag = WriteDeformedMeshFlag.WriteUndeformed
write_elements = WriteConditionsFlag.WriteElementsOnly
post_mode = GiDPostMode.GiD_PostBinary
multi_file_flag = MultiFileFlag.SingleFile
self.gid_io = StructuralGidIO( self.path+self.problem_name, post_mode, multi_file_flag, write_deformed_flag, write_elements )
self.gid_io.ReadModelPart(self.model_part)
self.meshWritten = False
## READ DEACTIVATION FILE ########################################
self.deac_file = open(self.path+self.problem_name+".deac",'r' )
self.activation_flags = [0]
for line in self.deac_file:
val_set = line.split(' ')
elem_num = int(val_set[0])
act_level = int(val_set[1])
self.activation_flags.append(act_level)
print "input data read OK"
#print "+++++++++++++++++++++++++++++++++++++++"
#for node in self.model_part.Nodes:
# print node
#print "+++++++++++++++++++++++++++++++++++++++"
#the buffer size should be set up here after the mesh is read for the first time
self.model_part.SetBufferSize(2)
##################################################################
## ADD DOFS ######################################################
##################################################################
ekate_solver.AddDofs( self.model_part )
##################################################################
## INITIALISE SOLVER FOR PARTICULAR SOLUTION #####################
##################################################################
#defining linear solver
plinear_solver = SkylineLUFactorizationSolver()
self.solver.structure_linear_solver = plinear_solver
self.solver.Initialize()
(self.solver.solver).SetEchoLevel(2);
##################################################################
## INITIALISE RESTART UTILITY ####################################
##################################################################
#restart_utility= RestartUtility( self.problem_name )
def SetUpActivationLevels( self, model_part, activation_list ):
for element in self.model_part.Elements:
element.SetValue(ACTIVATION_LEVEL, activation_list[element.Id])
# def write_restart_file( self, time ):
# print("------------> restart file written for time step: "+str(time))
# self.restart_utility.ChangeFileName(problem_name+str(time))
# self.restart_utility.StoreNodalVariables(model_part)
# self.restart_utility.StoreInSituStress(model_part)
# self.restart_utility.StoreConstitutiveLawVariables(model_part)
#
# def restart_time_step( self, time, Dt ):
# print("############ time step solution has to be restarted ############")
# time = time-Dt
# model_part.CloneTimeStep(time)
# for step in range(1,11):
# time = time+ Dt/10.0
# model_part.CloneTimeStep(time)
# #####################################################################################################
# model_part.ProcessInfo.SetValue( QUASI_STATIC_ANALYSIS, True )
# model_part.ProcessInfo.SetValue( FIRST_TIME_STEP, False )
# #####################################################################################################
# solver.Solve()
# print("~~~~~~~~~~~~~~ RESTARTED STEP ( DT= "+str(Dt/10.0)+" / Step= "+str(step)+" ) ~~~~~~~~~~~~~~")
# print("############ restart finished ############")
#
# def write_to_file( self, time ):
# for i in range(0, len(self.layer_nodes_sets['top'])):
# settlements.write(str(time)+"/"+str(model_part.Nodes[layer_nodes_sets['top'][i]].GetZ())+"/"+str(model_part.Nodes[layer_nodes_sets['top'][i]].GetSolutionStepValue(DISPLACEMENT_Z))+"\n")
# for i in range(0, len(layer_nodes_sets['side'])):
# pressure_air.write(str(time)+"/"+str(model_part.Nodes[layer_nodes_sets['side'][i]].GetZ())+"/"+str(model_part.Nodes[layer_nodes_sets['side'][i]].GetSolutionStepValue(AIR_PRESSURE))+"\n")
# pressure_water.write(str(time)+"/"+str(model_part.Nodes[layer_nodes_sets['side'][i]].GetZ())+"/"+str(model_part.Nodes[layer_nodes_sets['side'][i]].GetSolutionStepValue(WATER_PRESSURE))+"\n")
#
def ApplyInsituWaterPressure( self, free_node_list_water, free_node_list_air, z_zero):
gravity_z = -50;
water_density= 1000.0;
for i in range(1, len(self.model_part.Nodes)+1):
if self.model_part.Nodes[i].HasDofFor(WATER_PRESSURE):
if (self.model_part.Nodes[i].IsFixed(WATER_PRESSURE)==0):
water_pressure= 0.0
self.model_part.Nodes[i].SetSolutionStepValue(WATER_PRESSURE, water_pressure)
self.model_part.Nodes[i].SetSolutionStepValue(WATER_PRESSURE_EINS, water_pressure)
self.model_part.Nodes[i].SetSolutionStepValue(WATER_PRESSURE_NULL, water_pressure)
self.model_part.Nodes[i].Fix(WATER_PRESSURE)
free_node_list_water.append(i)
if self.model_part.Nodes[i].HasDofFor(AIR_PRESSURE):
if (self.model_part.Nodes[i].IsFixed(AIR_PRESSURE)==0):
self.model_part.Nodes[i].SetSolutionStepValue(AIR_PRESSURE, 0.0)
self.model_part.Nodes[i].SetSolutionStepValue(AIR_PRESSURE_EINS, 0.0)
self.model_part.Nodes[i].SetSolutionStepValue(AIR_PRESSURE_NULL, 0.0)
self.model_part.Nodes[i].Fix(AIR_PRESSURE)
free_node_list_air.append(i)
def FreePressureNodes(self,free_node_list_water, free_node_list_air):
for item in free_node_list_water:
self.model_part.Nodes[item].Free(WATER_PRESSURE)
for item in free_node_list_air:
self.model_part.Nodes[item].Free(AIR_PRESSURE)
def WriteMaterialParameters( self, time, indices ):
self.gid_io.OpenResultFile( self.path+self.problem_name, GiDPostMode.GiD_PostBinary)
for index in indices:
self.gid_io.SuperPrintOnGaussPoints(MATERIAL_PARAMETERS, self.model_part, time, index)
self.gid_io.CloseResultFile()
def WriteOutput( self, time ):
if( self.meshWritten == False ):
self.gid_io.InitializeMesh( 0.0 )
mesh = self.model_part.GetMesh()
self.gid_io.WriteMesh( mesh )
self.meshWritten = True
self.gid_io.FinalizeMesh()
self.gid_io.InitializeResults( 0.0, self.model_part.GetMesh() )
print("write nodal displacements")
self.gid_io.WriteNodalResults(DISPLACEMENT, self.model_part.Nodes, time, 0)
self.gid_io.FinalizeResults()
def InitializeModel( self ):
##################################################################
## INITIALISE CONSTITUTIVE LAWS ##################################
##################################################################
#set material parameters
params1 = Vector(7)
params1[0] = 2.1e+11 #Young's Modulus
params1[1] = 0.3 #Poisson Ratio
params1[2] = 0.0 #Internal Friction Angle
params1[3] = 0.0 #Cohesion
params1[4] = 0.0 #Compressive Strength
params1[5] = 0.0 #Tensile Strength
params1[6] = 0.0 #RMR Variance
self.model_part.Properties[1].SetValue(MATERIAL_PARAMETERS, params1 )
params2 = Vector(7)
params2[0] = 3e+10 #Young's Modulus
params2[1] = 0.3 #Poisson Ratio
params2[2] = 0.0 #Internal Friction Angle
params2[3] = 0.0 #Cohesion
params2[4] = 0.0 #Compressive Strength
params2[5] = 0.0 #Tensile Strength
params2[6] = 0.0 #RMR Variance
self.model_part.Properties[2].SetValue(MATERIAL_PARAMETERS, params2 )
self.model_part.Properties[1].SetValue(CONSTITUTIVE_LAW, Isotropic3D() )
print "Linear elastic model selected"
self.model_part.Properties[2].SetValue(CONSTITUTIVE_LAW, Isotropic3D() )
print "Linear elastic model selected"
##################################################################
## STORE LAYER SETS ##############################################
##################################################################
## ELEMENTS on layers ############################################
self.layer_sets = {}
layer_elements_list = [
1 ,
2 ,
3 ,
4 ,
5 ,
6 ,
7 ,
8 ,
9 ,
10 ,
11 ,
12 ,
13 ,
14 ,
15 ,
16 ,
17 ,
18 ,
19 ,
20 ,
21 ,
22 ,
23 ,
24 ,
25 ,
]
self.layer_sets['Layer0'] = layer_elements_list
## ELEMENTS on inner boundaries ##################################
self.inner_boundary_elements = [
]
## NODES on layers ###############################################
self.layer_nodes_sets = {}
layer_nodes_list = [
1 ,
2 ,
3 ,
4 ,
5 ,
6 ,
7 ,
8 ,
9 ,
10 ,
11 ,
12 ,
13 ,
14 ,
15 ,
16 ,
17 ,
18 ,
19 ,
20 ,
21 ,
22 ,
23 ,
24 ,
25 ,
26 ,
27 ,
28 ,
29 ,
30 ,
31 ,
32 ,
33 ,
34 ,
35 ,
36 ,
37 ,
38 ,
39 ,
40 ,
41 ,
42 ,
43 ,
44 ,
45 ,
46 ,
47 ,
48 ,
49 ,
50 ,
51 ,
52 ,
53 ,
54 ,
55 ,
56 ,
57 ,
58 ,
59 ,
60 ,
61 ,
62 ,
63 ,
64 ,
65 ,
66 ,
67 ,
68 ,
69 ,
70 ,
71 ,
72 ,
73 ,
74 ,
75 ,
76 ,
77 ,
78 ,
79 ,
80 ,
81 ,
82 ,
83 ,
84 ,
85 ,
86 ,
87 ,
88 ,
89 ,
90 ,
91 ,
92 ,
93 ,
94 ,
95 ,
96 ,
97 ,
98 ,
99 ,
100 ,
101 ,
102 ,
103 ,
104 ,
105 ,
106 ,
107 ,
108 ,
109 ,
110 ,
111 ,
112 ,
113 ,
114 ,
115 ,
116 ,
117 ,
118 ,
119 ,
120 ,
121 ,
122 ,
123 ,
124 ,
125 ,
126 ,
127 ,
128 ,
129 ,
130 ,
131 ,
132 ,
133 ,
134 ,
135 ,
136 ,
137 ,
138 ,
139 ,
140 ,
141 ,
142 ,
143 ,
144 ,
145 ,
146 ,
147 ,
148 ,
149 ,
150 ,
151 ,
152 ,
153 ,
154 ,
155 ,
156 ,
157 ,
158 ,
159 ,
160 ,
161 ,
162 ,
163 ,
164 ,
165 ,
166 ,
167 ,
168 ,
169 ,
170 ,
171 ,
172 ,
173 ,
174 ,
175 ,
176 ,
177 ,
178 ,
179 ,
180 ,
181 ,
182 ,
183 ,
184 ,
185 ,
186 ,
187 ,
188 ,
189 ,
190 ,
191 ,
192 ,
193 ,
194 ,
195 ,
196 ,
197 ,
198 ,
199 ,
200 ,
201 ,
202 ,
203 ,
204 ,
205 ,
206 ,
207 ,
208 ,
209 ,
210 ,
211 ,
212 ,
213 ,
214 ,
215 ,
216 ,
217 ,
218 ,
219 ,
220 ,
221 ,
222 ,
223 ,
224 ,
225 ,
226 ,
227 ,
228 ,
229 ,
230 ,
231 ,
232 ,
233 ,
234 ,
235 ,
236 ,
237 ,
238 ,
239 ,
240 ,
241 ,
242 ,
243 ,
244 ,
245 ,
246 ,
247 ,
248 ,
249 ,
250 ,
251 ,
252 ,
253 ,
254 ,
255 ,
256 ,
257 ,
258 ,
259 ,
260 ,
261 ,
262 ,
263 ,
264 ,
265 ,
266 ,
267 ,
268 ,
269 ,
270 ,
271 ,
272 ,
273 ,
274 ,
275 ,
276 ,
277 ,
278 ,
279 ,
280 ,
281 ,
282 ,
283 ,
284 ,
285 ,
286 ,
287 ,
288 ,
289 ,
290 ,
291 ,
292 ,
293 ,
294 ,
295 ,
296 ,
297 ,
298 ,
299 ,
300 ,
301 ,
302 ,
303 ,
304 ,
305 ,
306 ,
307 ,
308 ,
309 ,
310 ,
311 ,
312 ,
313 ,
314 ,
315 ,
316 ,
317 ,
318 ,
319 ,
320 ,
321 ,
322 ,
323 ,
324 ,
325 ,
326 ,
327 ,
328 ,
329 ,
330 ,
331 ,
332 ,
333 ,
334 ,
335 ,
336 ,
337 ,
338 ,
339 ,
340 ,
341 ,
342 ,
343 ,
344 ,
345 ,
346 ,
347 ,
348 ,
349 ,
350 ,
351 ,
352 ,
353 ,
354 ,
355 ,
356 ,
357 ,
358 ,
359 ,
360 ,
361 ,
362 ,
363 ,
364 ,
365 ,
366 ,
367 ,
368 ,
369 ,
370 ,
371 ,
372 ,
373 ,
374 ,
375 ,
376 ,
377 ,
378 ,
379 ,
380 ,
381 ,
382 ,
383 ,
384 ,
385 ,
386 ,
387 ,
388 ,
389 ,
390 ,
391 ,
392 ,
393 ,
394 ,
395 ,
396 ,
397 ,
398 ,
399 ,
400 ,
401 ,
402 ,
403 ,
404 ,
405 ,
406 ,
407 ,
408 ,
409 ,
410 ,
411 ,
412 ,
413 ,
414 ,
415 ,
416 ,
417 ,
418 ,
419 ,
420 ,
421 ,
422 ,
423 ,
424 ,
425 ,
426 ,
427 ,
428 ,
429 ,
430 ,
431 ,
432 ,
433 ,
434 ,
435 ,
436 ,
437 ,
438 ,
439 ,
440 ,
441 ,
442 ,
443 ,
444 ,
445 ,
446 ,
447 ,
448 ,
449 ,
450 ,
451 ,
452 ,
453 ,
454 ,
455 ,
456 ,
457 ,
458 ,
459 ,
460 ,
461 ,
462 ,
463 ,
464 ,
465 ,
466 ,
467 ,
468 ,
]
self.layer_nodes_sets['Layer0'] = layer_nodes_list
## CONTACT MASTER NODES ##########################################
self.contact_master_nodes = [
6 ,
8 ,
15 ,
22 ,
25 ,
33 ,
37 ,
41 ,
49 ,
55 ,
56 ,
67 ,
73 ,
74 ,
79 ,
91 ,
92 ,
97 ,
109 ,
110 ,
115 ,
127 ,
128 ,
133 ,
145 ,
146 ,
151 ,
163 ,
164 ,
169 ,
181 ,
182 ,
187 ,
199 ,
200 ,
205 ,
217 ,
218 ,
223 ,
231 ,
233 ,
241 ,
244 ,
245 ,
250 ,
262 ,
263 ,
268 ,
280 ,
281 ,
286 ,
298 ,
299 ,
304 ,
316 ,
317 ,
322 ,
334 ,
335 ,
340 ,
352 ,
353 ,
358 ,
370 ,
371 ,
376 ,
388 ,
389 ,
394 ,
406 ,
407 ,
412 ,
424 ,
425 ,
430 ,
442 ,
443 ,
448 ,
456 ,
458 ,
466 ,
]
## CONTACT SLAVE NODES ###########################################
self.contact_slave_nodes = [
3 ,
7 ,
12 ,
20 ,
21 ,
32 ,
40 ,
45 ,
52 ,
62 ,
66 ,
72 ,
83 ,
87 ,
90 ,
102 ,
105 ,
108 ,
120 ,
123 ,
126 ,
138 ,
141 ,
144 ,
156 ,
159 ,
162 ,
174 ,
177 ,
180 ,
192 ,
195 ,
198 ,
210 ,
213 ,
216 ,
228 ,
232 ,
239 ,
253 ,
257 ,
261 ,
273 ,
276 ,
279 ,
291 ,
294 ,
297 ,
309 ,
312 ,
315 ,
327 ,
330 ,
333 ,
345 ,
348 ,
351 ,
363 ,
366 ,
369 ,
381 ,
384 ,
387 ,
399 ,
402 ,
405 ,
417 ,
420 ,
423 ,
435 ,
438 ,
441 ,
453 ,
457 ,
464 ,
]
## INNER BOUNDARY NODES ##########################################
self.inner_boundary_nodes = [
]
##################################################################
print "layer sets stored"
##################################################################
## STORE NODES ON GROUND SURFACE #################################
##################################################################
self.top_surface_nodes = []
print "nodes on ground surface stored"
##################################################################
## ACTIVATION ####################################################
##################################################################
self.deac = DeactivationUtility()
self.SetUpActivationLevels( self.model_part, self.activation_flags )
self.deac.Initialize( self.model_part )
print "activation utility initialized"
##################################################################
## MESH TYING ####################################################
##################################################################
self.mesh_tying_utility= MeshTyingUtility()
self.mesh_tying_utility.InitializeMeshTyingUtility(self.model_part)
print "mesh-tying utility successfully initialized"
print "model successfully initialized"
self.SetCalculateInSituStress( False )
def FinalizeModel( self ):
self.gid_io.CloseResultFile()
def SetCalculateInSituStress( self, calculation_flag ):
self.insitu_stress_flag = calculation_flag
def Solve( self, time, from_deac, to_deac, from_reac, to_reac ):
self.deac.Reactivate( self.model_part, from_reac, to_reac )
self.deac.Deactivate( self.model_part, from_deac, to_deac )
self.model_part.CloneTimeStep(time)
self.model_part.ProcessInfo.SetValue( CALCULATE_INSITU_STRESS, self.insitu_stress_flag )
self.solver.Solve()
##################################################################
```
#### File: trilinos_application/python_scripts/trilinos_structural_solver_static.py
```python
from Kratos import *
from KratosStructuralApplication import *
from KratosTrilinosApplication import *
def AddVariables(model_part):
model_part.AddNodalSolutionStepVariable(DISPLACEMENT);
## model_part.AddNodalSolutionStepVariable(DISPLACEMENT_OLD);
## model_part.AddNodalSolutionStepVariable(DISPLACEMENT_NULL);
## model_part.AddNodalSolutionStepVariable(DISPLACEMENT_EINS);
## model_part.AddNodalSolutionStepVariable(DISPLACEMENT_DT);
## model_part.AddNodalSolutionStepVariable(DISPLACEMENT_NULL_DT);
## model_part.AddNodalSolutionStepVariable(DISPLACEMENT_EINS_DT);
## model_part.AddNodalSolutionStepVariable(ACCELERATION);
## model_part.AddNodalSolutionStepVariable(ACCELERATION_NULL);
## model_part.AddNodalSolutionStepVariable(ACCELERATION_EINS);
## model_part.AddNodalSolutionStepVariable(VELOCITY);
## model_part.AddNodalSolutionStepVariable(ACCELERATION);
model_part.AddNodalSolutionStepVariable(REACTION);
model_part.AddNodalSolutionStepVariable(NEGATIVE_FACE_PRESSURE);
model_part.AddNodalSolutionStepVariable(POSITIVE_FACE_PRESSURE);
model_part.AddNodalSolutionStepVariable(INSITU_STRESS);
model_part.AddNodalSolutionStepVariable(FACE_LOAD);
print "variables for the dynamic structural solution added correctly"
def AddDofs(model_part):
for node in model_part.Nodes:
#adding dofs
node.AddDof(DISPLACEMENT_X,REACTION_X);
node.AddDof(DISPLACEMENT_Y,REACTION_Y);
node.AddDof(DISPLACEMENT_Z,REACTION_Z);
print "dofs for the dynamic structural solution added correctly"
class StaticStructuralSolver:
#######################################################################
def __init__(self,model_part,domain_size):
self.model_part = model_part
self.time_scheme = TrilinosResidualBasedIncrementalUpdateStaticScheme()
self.Comm = CreateCommunicator()
#definition of the solvers
self.structure_linear_solver = TrilinosLinearSolver()
#definition of the convergence criteria
self.conv_criteria = TrilinosDisplacementCriteria(1e-6,1e-9,self.Comm)
self.CalculateReactionFlag = False
self.ReformDofSetAtEachStep = False
self.MoveMeshFlag = True
self.calculate_norm_dx_flag = False
self.max_iterations = 10
if(domain_size == 2):
self.guess_row_size = 15
else:
self.guess_row_size = 45
self.guess_row_size = 18
#######################################################################
def Initialize(self):
## p_builder = TrilinosBuilderAndSolver(self.Comm,self.guess_row_size,self.structure_linear_solver)
##
## #creating the solution strategy
## self.solver = ResidualBasedNewtonRaphsonStrategy(self.model_part, self.time_scheme,self.structure_linear_solver, self.conv_criteria, p_builder,self.max_iterations, self.CalculateReactionFlag,self.ReformDofSetAtEachStep, self.MoveMeshFlag)
import trilinos_strategy_python
self.solver = trilinos_strategy_python.SolvingStrategyPython(self.model_part,self.time_scheme,self.structure_linear_solver,self.conv_criteria,self.CalculateReactionFlag,self.ReformDofSetAtEachStep,self.MoveMeshFlag,self.Comm,self.guess_row_size)
#######################################################################
def Solve(self):
(self.solver).Solve()
#######################################################################
def SetEchoLevel(self,level):
(self.solver).SetEchoLevel(level)
```
#### File: ULFapplication/python_scripts/ulf_fsi.py
```python
from Kratos import *
from KratosULFApplication import *
from KratosStructuralApplication import *
from KratosMeshingApplication import *
#import time
def AddVariables(model_part):
model_part.AddNodalSolutionStepVariable(PRESSURE);
model_part.AddNodalSolutionStepVariable(DISPLACEMENT);
model_part.AddNodalSolutionStepVariable(VELOCITY);
model_part.AddNodalSolutionStepVariable(ACCELERATION);
model_part.AddNodalSolutionStepVariable(POSITIVE_FACE_PRESSURE);
model_part.AddNodalSolutionStepVariable(DENSITY);
model_part.AddNodalSolutionStepVariable(VISCOSITY);
model_part.AddNodalSolutionStepVariable(NODAL_AREA);
model_part.AddNodalSolutionStepVariable(BODY_FORCE);
model_part.AddNodalSolutionStepVariable(FORCE);
model_part.AddNodalSolutionStepVariable(IS_FLUID);
model_part.AddNodalSolutionStepVariable(IS_INTERFACE);
model_part.AddNodalSolutionStepVariable(IS_STRUCTURE);
model_part.AddNodalSolutionStepVariable(IS_BOUNDARY);
model_part.AddNodalSolutionStepVariable(IS_FREE_SURFACE);
model_part.AddNodalSolutionStepVariable(IS_LAGRANGIAN_INLET);
model_part.AddNodalSolutionStepVariable(BULK_MODULUS);
model_part.AddNodalSolutionStepVariable(NODAL_H);
model_part.AddNodalSolutionStepVariable(NORMAL);
def AddDofs(model_part):
for node in model_part.Nodes:
#adding dofs
node.AddDof(DISPLACEMENT_X);
node.AddDof(DISPLACEMENT_Y);
node.AddDof(DISPLACEMENT_Z);
node.AddDof(IS_STRUCTURE);
class ULF_FSISolver:
def __init__(self, fluid_model_part, structure_model_part, combined_model_part, box_corner1,box_corner2, domain_size, gid_io):
self.domain_size=domain_size;
self.echo_level = 0
self.gid_io=gid_io
self.tttt=0
#saving the different model parts
self.combined_model_part = combined_model_part; #contains both structure and fluid
self.fluid_model_part = fluid_model_part; #contains only fluid elements
self.structure_model_part = structure_model_part; #contains only structural elements
#time integration scheme
damp_factor = -0.3
self.time_scheme = ResidualBasedPredictorCorrectorBossakScheme(damp_factor)
#definition of the solvers
# self.model_linear_solver = SkylineLUFactorizationSolver()
pDiagPrecond = DiagonalPreconditioner()
self.model_linear_solver = BICGSTABSolver(1e-8, 5000,pDiagPrecond)
#definition of the convergence criteria
self.conv_criteria = DisplacementCriteria(1e-6,1e-9)
self.pressure_calculate_process = PressureCalculateProcess(fluid_model_part,domain_size);
self.ulf_apply_bc_process = UlfApplyBCProcess(fluid_model_part);
self.ulf_time_step_dec_process = UlfTimeStepDecProcess(fluid_model_part);
self.mark_fluid_process = MarkFluidProcess(fluid_model_part);
self.mark_close_nodes_process = MarkCloseNodesProcess(fluid_model_part);
self.mark_outer_nodes_process = MarkOuterNodesProcess(fluid_model_part);
self.node_erase_process = NodeEraseProcess(fluid_model_part);
#tools to save and merge the structural contributions
self.save_structure_model_part_process = SaveStructureModelPartProcess();
self.save_structure_conditions_process = SaveStructureConditionsProcess();
self.merge_model_parts_process = MergeModelPartsProcess();
###temporary ... i need it to calculate the nodal area
self.UlfUtils = UlfUtils()
#self.save_structural_elements
self.alpha_shape = 1.5;
self.h_multiplier = 0.3
##saving the limits of the box (all the nodes external to this will be erased)
self.box_corner1 = box_corner1
self.box_corner2 = box_corner2
if(domain_size == 2):
self.Mesher = TriGenPFEMModeler()
self.combined_neigh_finder = FindNodalNeighboursProcess(combined_model_part,9,18)
self.fluid_neigh_finder = FindNodalNeighboursProcess(fluid_model_part,9,18)
elif (domain_size == 3):
#self.Mesher = TetGenModeler()
#improved mesher
self.Mesher = TetGenPfemModeler()
self.combined_neigh_finder = FindNodalNeighboursProcess(combined_model_part,20,30)
self.fluid_neigh_finder = FindNodalNeighboursProcess(fluid_model_part,20,30)
print "after reading all the model contains:"
print self.fluid_model_part
#detect initial size distribution - note that initially the fluid model part contains
#all the elements of both structure and fluid ... this is only true after reading the input
(self.fluid_neigh_finder).Execute();
Hfinder = FindNodalHProcess(fluid_model_part);
Hfinder.Execute();
#######################################################################
#delta time estimation based on the non-negativity of the jacobian
def EstimateDeltaTime(self,max_dt,domain_size):
#return (self.UlfUtils).EstimateDeltaTime(min_dt,max_dt,self.combined_model_part)
return (self.ulf_time_step_dec_process).EstimateDeltaTime(max_dt,domain_size)
#######################################################################
def Initialize(self):
#creating the solution strategy
CalculateReactionFlag = False
ReformDofSetAtEachStep = True
MoveMeshFlag = True
import ulf_strategy_python
self.solver = ulf_strategy_python.ULFStrategyPython(self.combined_model_part,self.time_scheme,self.model_linear_solver,self.conv_criteria,CalculateReactionFlag,ReformDofSetAtEachStep,MoveMeshFlag,self.domain_size)
print "self.echo_level = " , self.echo_level
(self.solver).SetEchoLevel(self.echo_level)
print "finished initialization of the fluid strategy"
#saving the structural elements
(self.mark_fluid_process).Execute(); #we need this before saving the structrural elements
print "Saving STRUCTURE"
(self.save_structure_model_part_process).SaveStructure(self.fluid_model_part, self.structure_model_part, self.domain_size);
(self.save_structure_conditions_process).SaveStructureConditions(self.fluid_model_part, self.structure_model_part, self.domain_size);
#marking the fluid
(self.fluid_neigh_finder).Execute();
(self.ulf_apply_bc_process).Execute();
(self.mark_fluid_process).Execute();
#remeshing before the first solution
self.Remesh();
######################################################################
def CheckForInvertedElements(self):
#volume = (self.UlfUtils).CalculateVolume(self.combined_model_part,self.domain_size)
volume = (self.UlfUtils).CalculateVolume(self.fluid_model_part,self.domain_size)
inverted_elements = False
if(volume < 0.0):
volume = - volume
inverted_elements = True
return [inverted_elements,volume]
#######################################################################
def Solve(self):
print "solving the fluid problem"
inverted_elements = (self.solver).Solve(self.domain_size,self.UlfUtils)
print "succesful solution of the fluid "
reduction_factor = 0.5
max_reduction_steps = 5
time_reduction_step = 0
while(inverted_elements == True and time_reduction_step <= max_reduction_steps):
print " *************************************************** "
print "inverted element found ... reducing the time step"
(self.UlfUtils).ReduceTimeStep(self.combined_model_part,reduction_factor);
(self.UlfUtils).ReduceTimeStep(self.fluid_model_part,reduction_factor);
(self.UlfUtils).ReduceTimeStep(self.structure_model_part,reduction_factor);
print "reduction_step = ", time_reduction_step
time_reduction_step = time_reduction_step + 1
#copying vars from the old step
## for node in (self.combined_model_part).Nodes:
## pold = node.GetSolutionStepValue(PRESSURE,1);
## dispold = node.GetSolutionStepValue(DISPLACEMENT,1);
## velold = node.GetSolutionStepValue(VELOCITY,1);
## accold = node.GetSolutionStepValue(ACCELERATION,1);
##
## node.SetSolutionStepValue(PRESSURE,0,pold);
## node.SetSolutionStepValue(DISPLACEMENT,0,dispold);
## node.SetSolutionStepValue(VELOCITY,0,velold);
## node.SetSolutionStepValue(ACCELERATION,0,accold);
self.solver.MoveMesh()
print "time step reduction completed"
print " *************************************************** "
(self.solver).Solve(self.domain_size,self.UlfUtils)
[inverted_elements,vol] = self.CheckForInvertedElements()
if(inverted_elements == True):
print "***********************************************************************"
print "***********************************************************************"
print "CRITICAL: ... element is still inverted after reducing the time step"
print "***********************************************************************"
print "***********************************************************************"
factor = 2.0**5 #this is the original time step
(self.UlfUtils).ReduceTimeStep(self.combined_model_part,factor);
(self.UlfUtils).ReduceTimeStep(self.fluid_model_part,factor);
(self.UlfUtils).ReduceTimeStep(self.structure_model_part,factor);
## for node in (self.combined_model_part).Nodes:
## pold = node.GetSolutionStepValue(PRESSURE,1);
## dispold = node.GetSolutionStepValue(DISPLACEMENT,1);
## velold = node.GetSolutionStepValue(VELOCITY,1);
## accold = node.GetSolutionStepValue(ACCELERATION,1);
##
## node.SetSolutionStepValue(PRESSURE,0,pold);
## node.SetSolutionStepValue(DISPLACEMENT,0,dispold);
## node.SetSolutionStepValue(VELOCITY,0,velold);
## node.SetSolutionStepValue(ACCELERATION,0,accold);
self.solver.MoveMesh()
print "advancing in time without doing anything..."
(self.solver).PredictionStep(self.domain_size,self.UlfUtils)
#print "pressure contribution process" - to be executed using exclusively fluid elements
#and neighbouring relationships
(self.fluid_neigh_finder).Execute();
(self.UlfUtils).CalculateNodalArea(self.fluid_model_part,self.domain_size);
(self.pressure_calculate_process).Execute();
#print "remeshing"
self.Remesh();
######################################################################
def Remesh(self):
#(self.UlfUtils).MarkNodesCloseToFS(self.fluid_model_part, 2)
##erase all conditions and elements prior to remeshing
((self.combined_model_part).Elements).clear();
((self.combined_model_part).Conditions).clear();
((self.combined_model_part).Nodes).clear();
((self.fluid_model_part).Elements).clear();
((self.fluid_model_part).Conditions).clear();
#and erase bad nodes
#(self.mark_close_nodes_process).MarkCloseNodes(self.h_multiplier);
#(self.mark_outer_nodes_process).MarkOuterNodes(self.box_corner1, self.box_corner2);
#(self.node_erase_process).Execute();
##remesh CHECK for 3D or 2D
if (self.domain_size == 2):
(self.Mesher).ReGenerateUpdatedLagrangian(self.fluid_model_part, self.node_erase_process, self.alpha_shape)
#(self.Mesher).ReGenerateUpdatedLagrangian(self.fluid_model_part,self.alpha_shape)
elif (self.domain_size == 3):
#(self.Mesher).ReGenerateUpdatedLagrangian3D(self.fluid_model_part,self.alpha_shape)
#improved qaulity mesher
(self.Mesher).ReGenerateMeshPfemUlf3D(self.fluid_model_part,self.alpha_shape)
##calculating fluid neighbours before applying boundary conditions
(self.fluid_neigh_finder).Execute();
## (self.UlfUtils).CalculateNodalArea(self.fluid_model_part,self.domain_size);
#print "marking fluid" and applying fluid boundary conditions
(self.ulf_apply_bc_process).Execute();
(self.mark_fluid_process).Execute();
#merging the structural elements back (they are saved in the Initialize)
(self.merge_model_parts_process).MergeParts(self.fluid_model_part, self.structure_model_part, self.combined_model_part);
#calculating the neighbours for the overall model
(self.combined_neigh_finder).Execute();
#(self.UlfUtils).CalculateNodalArea(self.fluid_model_part,self.domain_size);
## for elem in self.combined_model_part.Elements:
## print elem
print "end of remesh fucntion"
######################################################################
def FindNeighbours(self):
(self.neigh_finder).Execute();
```
#### File: test_exemples/firstNISTpaolo.gid/firstNISTpaolo.py
```python
domain_size = 2
#total simulation time
total_time = 2.0
#the max time step - it may be decreased in case it is necessary to avoid element inversion
max_delta_time = 0.001
#output time (every xxx seconds)
output_dt = 0.005
#safety factor for the delta time estimation
safety_factor = 0.5
#PATH of where the kratos library is installed
kratos_libs_path = '../../../../libs/'
#PATH of where your application is installed
kratos_applications_path = '../../../../applications/'
project_name = 'firstNISTpaolo'
##################################################################
##################################################################
## ATTENTION: here the order is important
import sys
sys.path.append(kratos_libs_path)
sys.path.append(kratos_applications_path)
#importing Kratos main library
from Kratos import *
kernel = Kernel() #defining kernel
#importing applications
import applications_interface
applications_interface.Import_PFEMApplication = True
applications_interface.Import_ULFApplication = True
applications_interface.Import_StructuralApplication = True
applications_interface.Import_ConvectionDiffusionApplication = True
###applications_interface.Import_ExternalSolversApplication = True
applications_interface.ImportApplications(kernel, kratos_applications_path)
## from now on the order is not anymore crucial
##################################################################
##################################################################
print kernel
print "aaa"
from KratosULFApplication import *
print "bbb"
##from KratosPFEMApplication import *
from KratosConvectionDiffusionApplication import *
print "ccc"
from KratosExternalSolversApplication import *
def PrintLevel(time,outfile,level):
out = str(time) + " " + str(level) + "\n"
outfile.write(out)
outfile.flush()
#Calculate mass loss rate -- assume sample is 10 cm wide
def PrintVolume(time,outfile,volume0):
checkresults = solver.CheckForInvertedElements()
volume = checkresults.pop()
volume_percent = (volume/volume0)*100.
density = 900. # (kg/m^3) Should actually use value set in NISTParameters.py
thickfrac = 0.1 # fraction of 1 meter for thickness perpendicular to 2D plane
mass = volume * thickfrac * density * 1000. # convert to grams
outstring = str(time) + " "
outstring += str(volume) + " "
outstring += str(volume_percent) + " "
outstring += str(mass) + "\n"
outfile.write( outstring )
outfile.flush()
#Calculate mass above and below y=0 line -- assume sample is 10 cm wide
def PrintVolumeCatchpan(time,outfile,volume0):
checkresults = solver.CheckForInvertedElements()
volume = checkresults.pop()
volume_percent = (volume/volume0)*100.
objectvol = 0.
catchpanvol = 0.
for node in fluid_model_part.Nodes:
if (node.Y > 0.0):
objectvol=objectvol + node.GetSolutionStepValue(NODAL_AREA)
else:
catchpanvol=catchpanvol + node.GetSolutionStepValue(NODAL_AREA)
density = 900. # (kg/m^3) Should actually use value set in NISTParameters.py
thickfrac = 0.1 # fraction of 1 meter for thickness perpendicular to 2D plane
mass = volume * thickfrac * density * 1000. # convert to grams
objectmass = objectvol * thickfrac * density * 1000. # convert to grams
catchpanmass = catchpanvol * thickfrac * density * 1000. # convert to grams
outstring = str(time) + " "
outstring += str(objectvol) + " "
outstring += str(objectmass) + " "
outstring += str(catchpanvol) + " "
outstring += str(catchpanmass) + " "
outstring += str(volume) + " "
outstring += str(mass) + " "
outstring += str(volume_percent) + "\n"
outfile.write( outstring )
outfile.flush()
#this is for the debugging
##x = raw_input("stopped to allow debug: set breakpoints and press enter to continue");
#defining a model part
fluid_model_part = ModelPart("FluidPart");
temperature_model_part = ModelPart("TemperaturePart");
structure_model_part = ModelPart("StructurePart");
combined_model_part = ModelPart("CombinedPart");
#adding of Variables to Model Part should be here when the "very fix container will be ready"
#importing the solver files
import ulf_fsi
ulf_fsi.AddVariables(fluid_model_part)
import nonlinear_convection_diffusion_solver
nonlinear_convection_diffusion_solver.AddVariables(fluid_model_part) #the nodes are the same
#reading a model
gid_io = GidIO(project_name,GiDPostMode.GiD_PostBinary)
gid_io.ReadModelPart(fluid_model_part)
#gid_io.ReadMesh(model_part.GetMesh())
print fluid_model_part
for node in fluid_model_part.Nodes:
node.SetSolutionStepValue(BULK_MODULUS,0,-10000.0);
fluid_model_part.GetMesh().Properties[1][BULK_MODULUS] = -10000.0
#generating temperature model part
from KratosIncompressibleFluidApplication import *
##from KratosPFEMApplication import *
from KratosULFApplication import *
#import KratosPFEMApplication
NISTTools = NistUtils()
NISTTools.GenerateModelPart(fluid_model_part,temperature_model_part,domain_size);
#the buffer size should be set up here after the mesh is read for the first time
fluid_model_part.SetBufferSize(2)
ulf_fsi.AddDofs(fluid_model_part)
nonlinear_convection_diffusion_solver.AddDofs(temperature_model_part) #the nodes are the same
import NistParameters
NistParameters.InitialConditions(fluid_model_part)
#setting the limits of the bounding box
box_corner1 = Vector(3); box_corner1[0]=-0.1030001; box_corner1[1]=-0.0330001; box_corner1[2]=-0.1;
box_corner2 = Vector(3); box_corner2[0]=0.1030001; box_corner2[1]=0.100001; box_corner2[2]=0.1;
#creating a fluid solver object
name = project_name
solver = ulf_fsi.ULF_FSISolver(fluid_model_part, structure_model_part, combined_model_part, box_corner1, box_corner2, domain_size)
solver.alpha_shape = 1.5;
solver.echo_level = 1;
solver.h_multiplier = 0.3
solver.model_linear_solver = SkylineLUFactorizationSolver() # not available on Windows
#solver.model_linear_solver = SuperLUSolver() # not available on Windows
#initializing the solver
solver.Initialize()
scalarout = open("volume_history.out", 'w')
scalarout.write("time volume(m^2) volumepercent(%) mass(g) \n")
checkresults = solver.CheckForInvertedElements()
volume0 = checkresults.pop()
catchpanout = open("volume_history_catchpan.out", 'w')
catchpanout.write("time(s) objectvol(m^2) objectmass(g) catchpanvol(m2) catchpanmass(g) totalvol(m2) totalmass(g) totalvolpercent(%) \n")
checkresults = solver.CheckForInvertedElements()
volume0 = checkresults.pop()
#convection diffusion solver
temperature_solver = nonlinear_convection_diffusion_solver.ConvectionDiffusionSolver(temperature_model_part,domain_size)
#temperature_solver = convection_diffusion_solver.ConvectionDiffusionSolver(temperature_model_part,domain_size)
temperature_solver.time_order = 1
temperature_solver.ReformDofAtEachIteration = True
temperature_solver.echo_level = 0
temperature_solver.Initialize()
Dt = 0.001
nsteps = 20000
#output_Dt = 0.05
output_Dt = 0.5
min_dt = 0.0002
#max_dt = 0.05
max_dt = 0.05
safety_factor = 0.1
next_output_time = output_Dt
time = Dt
#OutputStep(time,gid_io,fluid_model_part,domain_size)
NistParameters.InitialConditions(fluid_model_part)
NISTTools.ApplyInitialTemperature(fluid_model_part,500.0);
NistParameters.CalculateViscosity(fluid_model_part.Nodes)
face_heat_util = FaceHeatUtilities()
for node in fluid_model_part.Nodes:
node.Fix(DISPLACEMENT_Z);
node.SetSolutionStepValue(DISPLACEMENT_Z,0,0.0);
time = 0.00
step = 0
PrintVolume (time, scalarout, volume0)
PrintVolumeCatchpan (time, catchpanout, volume0)
while(time < 1500.0):
step = step + 1
print "min_dt", min_dt
print "max_dt", max_dt
new_Dt = solver.EstimateDeltaTime(max_dt,domain_size)
print "forever dt", new_Dt
if(step < 10):
new_Dt = 0.01*new_Dt
if(time < 150):
new_Dt =4.0
time = time + new_Dt*safety_factor
fluid_model_part.CloneTimeStep(time)
structure_model_part.CloneTimeStep(time)
combined_model_part.CloneTimeStep(time)
temperature_model_part.CloneTimeStep(time)
print time
#solving the fluid problem
if(step > 3):
## NistParameters.ApplyBoundaryconditions(fluid_model_part.Nodes)
NISTTools.GenerateModelPart(fluid_model_part,temperature_model_part,domain_size);
face_heat_util.ApplyFaceHeat(fluid_model_part.Conditions,30000.0);
temperature_solver.Solve()
print "after solving for the temperature"
NistParameters.CalculateViscosity(fluid_model_part.Nodes) #applying the viscosity
## #clearing the model part
(temperature_model_part.Nodes).clear()
(temperature_model_part.Elements).clear()
(temperature_model_part.Conditions).clear()
## for node in fluid_model_part.Nodes:
## if(node.GetSolutionStepValue(VISCOSITY) > 1.0):
## node.Fix(DISPLACEMENT_X)
## node.Fix(DISPLACEMENT_Y)
## node.Fix(DISPLACEMENT_Z)
## else:
## if(node.GetSolutionStepValue(IS_STRUCTURE) != 1):
## node.Free(DISPLACEMENT_X)
## node.Free(DISPLACEMENT_Y)
## node.Free(DISPLACEMENT_Z)
#finalizing fluid calculation step
solver.Solve()
print "after completing the solution"
PrintVolume (time, scalarout, volume0)
PrintVolumeCatchpan (time, catchpanout, volume0)
if(time > next_output_time and time > 150.0):
file_name = "firstNISTtest_lagrangian"
file_name = file_name + str(time)
gid_io.ChangeOutputName(file_name,GiDPostMode.GiD_PostBinary);
gid_io.WriteMesh2D((combined_model_part).GetMesh(),domain_size,GiDPostMode.GiD_PostBinary);
gid_io.WriteNodalResults(DISPLACEMENT, combined_model_part.Nodes, time, 0);
gid_io.WriteNodalResults(NODAL_H, combined_model_part.Nodes, time, 0);
gid_io.WriteNodalResults(IS_FLUID, combined_model_part.Nodes, time, 0);
gid_io.WriteNodalResults(IS_BOUNDARY, combined_model_part.Nodes, time, 0);
gid_io.WriteNodalResults(IS_FREE_SURFACE, combined_model_part.Nodes, time, 0);
gid_io.WriteNodalResults(IS_STRUCTURE, combined_model_part.Nodes, time, 0);
gid_io.WriteNodalResults(VELOCITY, combined_model_part.Nodes, time, 0);
gid_io.WriteNodalResults(PRESSURE, (combined_model_part).Nodes, time, 0);
gid_io.WriteNodalResults(BULK_MODULUS, (combined_model_part).Nodes, time, 0);
gid_io.WriteNodalResults(NODAL_AREA, (combined_model_part).Nodes, time, 0);
gid_io.WriteNodalResults(VISCOSITY, (combined_model_part).Nodes, time, 0);
gid_io.WriteNodalResults(TEMPERATURE, (combined_model_part).Nodes, time, 0);
gid_io.Flush()
#gid_io.CloseResultFile();
next_output_time = next_output_time + output_Dt;
print "completed step ",step
##
##
##
##
##Dt = 0.005
##nsteps = 10000
###output_Dt = 0.05
##output_Dt = output_dt
##min_dt = 0.00001
##max_dt = max_delta_time
##safety_factor = 0.5 #you should put a safety factor ;-)!!!
##
##next_output_time = output_Dt
##
###initializing the solver
##solver.Initialize()
##
##time = 0.0
##step = 0
##
##
##while (time < total_time):
## step = step+1
##
##
##
##
## print time
## if(step <= 3):
## new_Dt = 0.00000001;
## time = time + new_Dt*safety_factor
##
## #solving the fluid problem
## if(step > 3):
## new_Dt = solver.EstimateDeltaTime(max_dt,domain_size)
## time = time + new_Dt*safety_factor
##
## fluid_model_part.CloneTimeStep(time)
## structure_model_part.CloneTimeStep(time)
## combined_model_part.CloneTimeStep(time)
##
## print "before the solution"
##
## solver.Solve()
##
## print "after completing the solution"
##
## if(time > next_output_time):
##
## file_name = project_name
## file_name = file_name + str(time)
##
## gid_io.ChangeOutputName(file_name,GiDPostMode.GiD_PostBinary);
## gid_io.WriteMesh((combined_model_part).GetMesh(),domain_size,GiDPostMode.GiD_PostBinary);
##
## gid_io.WriteNodalResults(DISPLACEMENT, combined_model_part.Nodes, time, 0);
## gid_io.WriteNodalResults(NODAL_H, combined_model_part.Nodes, time, 0);
## gid_io.WriteNodalResults(IS_FLUID, combined_model_part.Nodes, time, 0);
## gid_io.WriteNodalResults(IS_BOUNDARY, combined_model_part.Nodes, time, 0);
## gid_io.WriteNodalResults(IS_FREE_SURFACE, combined_model_part.Nodes, time, 0);
## gid_io.WriteNodalResults(IS_STRUCTURE, combined_model_part.Nodes, time, 0);
## gid_io.WriteNodalResults(VELOCITY, combined_model_part.Nodes, time, 0);
## gid_io.WriteNodalResults(PRESSURE, (combined_model_part).Nodes, time, 0);
## gid_io.WriteNodalResults(BODY_FORCE, (combined_model_part).Nodes, time, 0);
##
## gid_io.Flush()
## #gid_io.CloseResultFile();
##
## next_output_time = next_output_time + output_Dt;
##
##
##
##
##
##
``` |
{
"source": "JiaqiWangplus77/pytorch_tiramisu",
"score": 2
} |
#### File: pytorch_tiramisu/datasets/camvid.py
```python
import os
import torch
import torch.utils.data as data
import numpy as np
from PIL import Image
from torchvision.datasets.folder import is_image_file, default_loader
classes = ['0_intact_road','5_crack']
#classes = ['0_intact_road', '1_applied_patch', '2_pothole',
# '3_inlaid_patch', '4_open_joint', '5_crack']
#classes = ['Sky', 'Building', 'Column-Pole', 'Road',
# 'Sidewalk', 'Tree', 'Sign-Symbol', 'Fence', 'Car', 'Pedestrain',
# 'Bicyclist', 'Void']
# https://github.com/yandex/segnet-torch/blob/master/datasets/camvid-gen.lua
#class_weight = torch.FloatTensor([
# 0.58872014284134, 0.51052379608154, 2.6966278553009,
# 0.45021694898605, 1.1785038709641, 0.77028578519821, 2.4782588481903,
# 2.5273461341858, 1.0122526884079, 3.2375309467316, 4.1312313079834, 0])
# to test the principle of class_weight
class_weight = torch.FloatTensor([6.25130619e-02, 8.05004041e-01, 5.62802143e+00, 1.33807906e+00,
1.31966127e+00, 8.27907404e-01])
# original weight for crack: 1.27907404e-01
# original weight for crack 2.25130619e-03
#mean = [0.3980712041733329]
#std = [0.15851423320841515]
#CFD
mean = [0.49623959446225074]
std = [0.060382144781743356]
#mean = [0.41189489566336, 0.4251328133025, 0.4326707089857]
#std = [0.27413549931506, 0.28506257482912, 0.28284674400252]
class_color = [
(128, 128, 128), # gray 0_intact_road
# (128, 0, 0), # brown 1_applied_patch
# (192, 192, 128), # 2_pothole
# (128, 64, 128), # 3_inlaid_patch
# (0, 0, 192), # 4_open_joint blue+
(128, 128, 0), # 5_crack
(192, 128, 128),
(64, 64, 128),
(64, 0, 128),
(64, 64, 0),
(0, 128, 192),
(0, 0, 0),
]
def _make_dataset(dir):
images = []
for root, _, fnames in sorted(os.walk(dir)):
# generate the file names in directory tree
# root = 'SegNet-Tutorial/CamVid/train'
# fnames: all the filenames in this folder
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
# 'SegNet-Tutorial/CamVid/train/0006R0_f02430.png'
item = path
images.append(item)
return images
class LabelToLongTensor(object):
def __call__(self, pic):
if isinstance(pic, np.ndarray):
# handle numpy array
label = torch.from_numpy(pic).long()
else:
label = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
label = label.view(pic.size[1], pic.size[0], 1)
label = label.transpose(0, 1).transpose(0, 2).squeeze().contiguous().long()
return label
class LabelTensorToPILImage(object):
def __call__(self, label):
label = label.unsqueeze(0)
colored_label = torch.zeros(3, label.size(1), label.size(2)).byte()
for i, color in enumerate(class_color):
mask = label.eq(i)
for j in range(3):
colored_label[j].masked_fill_(mask, color[j])
npimg = colored_label.numpy()
npimg = np.transpose(npimg, (1, 2, 0))
mode = None
if npimg.shape[2] == 1:
npimg = npimg[:, :, 0]
mode = "L"
return Image.fromarray(npimg, mode=mode)
#class RandomCrop(object):
# def __call__(self, img, target,size):
#
# return Image.fromarray(npimg, mode=mode)
class CamVid(data.Dataset):
def __init__(self, root, split='train', joint_transform=None,
transform=None, target_transform=LabelToLongTensor(),
download=False,
loader=default_loader):
self.root = root
assert split in ('train', 'val', 'test') # True if splis is in ()
self.split = split
self.transform = transform
self.target_transform = target_transform
self.joint_transform = joint_transform
self.loader = loader
# self.class_weight = class_weight
self.classes = classes # name of each class
# self.mean = mean
# self.std = std
if download:
self.download()
self.imgs = _make_dataset(os.path.join(self.root, self.split))
# return empty if there is no files in the folder
def __getitem__(self, index):
path = self.imgs[index]
# if self.split == 'test':
# print('filename:\n',path, '\n')
#img = self.loader(path)
img = Image.open(path).convert('L') # for single channel
target_name = path.replace(self.split+'/', self.split + 'annot'+'/').replace('jpg','png')
target = Image.open(target_name)
if self.joint_transform is not None:
img, target = self.joint_transform([img, target])
if self.transform is not None:
img = self.transform(img)
target = self.target_transform(target)
return img, target,path
def __len__(self):
return len(self.imgs)
def download(self):
# TODO: please download the dataset from
# https://github.com/alexgkendall/SegNet-Tutorial/tree/master/CamVid
raise NotImplementedError
class CamVid2(data.Dataset):
def __init__(self, root, path='', joint_transform=None,
transform=None, target_transform=LabelToLongTensor(),
download=False,
loader=default_loader):
self.root = root
self.path = path
# assert split in ('trainannot', 'valannot', 'testannot') # True if splis is in ()
# self.split = split
self.transform = transform
self.target_transform = target_transform
self.joint_transform = joint_transform
self.loader = loader
# self.class_weight = class_weight
self.classes = classes # name of each class
# self.mean = mean
# self.std = std
if download:
self.download()
with open(path, "r") as file:
self.imgs = file.read().splitlines()
# return empty if there is no files in the folder
def __getitem__(self, index):
path = self.imgs[index]
img = Image.open(path)
target_path = path.replace("images", "labels").replace(".jpg", ".png")
target = Image.open(target_path)
#target = Image.open(target_name)
if self.joint_transform is not None:
img, target = self.joint_transform([img, target])
if self.transform is not None:
img = self.transform(img)
target = self.target_transform(target)
return img, target,path
def __len__(self):
return len(self.imgs)
def download(self):
# TODO: please download the dataset from
# https://github.com/alexgkendall/SegNet-Tutorial/tree/master/CamVid
raise NotImplementedError
```
#### File: utils/previous/color_test_delete_later.py
```python
import numpy as np
import utils.training_crack as train_utils
from sklearn.metrics import confusion_matrix
import cv2
import matplotlib.pyplot as plt
from PIL import Image
def calculate_confusion_matrix(targets,preds,cls_num,t=5):
'''
claculate confusion matrix for only two class!!!!
!!! for batch..it also works..I think
input:
targets: tensor ground truth
preds: tensor predicted value
t: tolerance margin
'''
pre = preds
gt = targets
# targets = targets.data.cpu().numpy().flatten('C')
# preds = preds.data.cpu().numpy().flatten('C')
c_matrix = confusion_matrix(gt.flatten('C'), pre.flatten('C'),labels=np.arange(cls_num))
b,w,h = gt.shape
r = []
for k in range(b):
num = 0
for i in range(w):
for j in range(h):
if pre[k,i,j] == 1 :
c = gt[k,max(0,i-t):min(w,i+t+1),max(0,j-t):min(h,j+t+1)]
if c[c==1].sum() > 1:
num += 1
r.append(num)
c_matrix[0,1] = c_matrix[0,1] - (sum(r) - c_matrix[1,1])
c_matrix[1,1] = sum(r)
return c_matrix
targets = (np.load('output/ts.npy')[0,:,:])
preds = (np.load('output/pred.npy')[0,:,:])/5
t = 5 # tolerance margin
def generation_TP_FP_FN(targets,preds,t = 5):
TP = np.zeros_like(targets)
FP = np.zeros_like(targets)
FN = np.zeros_like(targets)
w,h = preds.shape
for i in range(w):
for j in range(h):
if preds[i,j] == 1 :
c = targets[max(0,i-t):min(w,i+t+1),max(0,j-t):min(h,j+t+1)]
if c[c==1].sum() > 1:
TP[i,j] = 1
else:
FP[i,j] = 1
else:
if targets[i,j] == 1:
FN[i,j] = 1
return TP, FP, FN
#cm = calculate_confusion_matrix(targets[None,:,:],preds[None,:,:],2,t=5)
#print(cm)
TP, FP, FN = generation_TP_FP_FN(targets,preds,t = 5)
print(TP.sum(),FP.sum(),FN.sum())
w, h = FN.shape
img = np.ones([w,h,3])*255
def draw_color(img,TP,color):
ind = TP==1
img1 = img[:,:,0]
img2 = img[:,:,1]
img3 = img[:,:,2]
img1[ind] = color[0]
img2[ind] = color[1]
img3[ind] = color[2]
return np.stack((img1,img2,img3),axis=-1)
img = draw_color(img,TP,[255,0,0])
img = draw_color(img,FP,[0,255,0])
img = draw_color(img,FN,[0,0,255])
path = 'SegNet-Tutorial/CamVid/CFD/aug/aug2_180/images/112.jpg'
image_original = np.array(Image.open(path)).astype(np.float32)
w_img = 0.8
w_label = 0.2
img = img.astype(np.float32)
image_merged = cv2.addWeighted(image_original,w_img,img,w_label,0,dtype = cv2.CV_32F)
plt.figure()
plt.imshow(img)
```
#### File: pytorch_tiramisu/utils/training.py
```python
import os
import sys
import math
import string
import random
import shutil
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.autograd import Variable
import torch.nn.functional as F
from sklearn.metrics import confusion_matrix
import numpy as np
from . import imgs as img_utils
RESULTS_PATH = '.results/'
WEIGHTS_PATH = '.weights/'
class FocalLoss2d(nn.Module):
def __init__(self, gamma=2, size_average=True):
super(FocalLoss2d, self).__init__()
self.gamma = gamma
self.size_average = size_average
def forward(self, logit, target, class_weight=None, type='softmax'):
target = target.view(-1, 1).long()
if type=='sigmoid':
if class_weight is None:
class_weight = [1]*2 #[0.5, 0.5]
prob = F.sigmoid(logit)
prob = prob.view(-1, 1)
prob = torch.cat((1-prob, prob), 1)
select = torch.FloatTensor(len(prob), 2).zero_().cuda()
select.scatter_(1, target, 1.)
elif type=='softmax':
B,C,H,W = logit.size()
if class_weight is None:
class_weight =[1]*C #[1/C]*C
logit = logit.permute(0, 2, 3, 1).contiguous().view(-1, C)
prob = F.softmax(logit,1)
select = torch.FloatTensor(len(prob), C).zero_().cuda()
select.scatter_(1, target, 1.)
class_weight = torch.FloatTensor(class_weight).cuda().view(-1,1)
class_weight = torch.gather(class_weight, 0, target)
prob = (prob*select).sum(1).view(-1,1)
prob = torch.clamp(prob,1e-8,1-1e-8)
batch_loss = - class_weight *(torch.pow((1-prob), self.gamma))*prob.log()
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss
return loss
def save_weights(model, epoch, loss, err,weights_fpath= '.weights/'):
#weights_fname = 'weights-%d-%.3f-%.3f.pth' % (epoch, loss, err)
weights_fname = 'weights-%d.pth' % (epoch)
weights_fpath = os.path.join(weights_fpath, weights_fname)
torch.save({
'startEpoch': epoch,
'loss':loss,
'error': err,
'state_dict': model.state_dict()
}, weights_fpath)
shutil.copyfile(weights_fpath, WEIGHTS_PATH+'latest.th')
def load_weights(model, fpath):
print("loading weights '{}'".format(fpath))
weights = torch.load(fpath)
startEpoch = weights['startEpoch']
model.load_state_dict(weights['state_dict'])
print("loaded weights (lastEpoch {}, loss {}, error {})"
.format(startEpoch-1, weights['loss'], weights['error']))
return startEpoch
def get_predictions(output_batch):
bs,c,h,w = output_batch.size()
tensor = output_batch.data
values, indices = tensor.cpu().max(1)
# for every pixel, get the largest probablilty and its index
indices = indices.view(bs,h,w)
return indices
def precision(confusion_matrix):
'''
precision = TP/(TP+FP) (axis=0)
'''
with np.errstate(divide='ignore',invalid='ignore'):
return np.diag(confusion_matrix)/confusion_matrix.sum(axis=0)
def recall(confusion_matrix):
'''
precision = TP/(TP+FN) (axis=1)
'''
with np.errstate(divide='ignore',invalid='ignore'):
return np.diag(confusion_matrix)/confusion_matrix.sum(axis=1)
def F1(precision,recall):
'''
f1 = 2 * precision * recall / (precision + recall)
'''
with np.errstate(divide='ignore',invalid='ignore'):
f1 = 2 * np.multiply(precision,recall) / (recall + precision)
return f1
def calculate_confusion_matrix(targets,preds,cls_num,t=5):
'''
claculate confusion matrix for only two class!!!!
!!! for batch..it also works..I think
input:
targets: tensor ground truth
preds: tensor predicted value
t: tolerance margin
'''
pre = preds.cpu().numpy()
gt = targets.cpu().numpy()
# targets = targets.data.cpu().numpy().flatten('C')
# preds = preds.data.cpu().numpy().flatten('C')
c_matrix = confusion_matrix(gt.flatten('C'), pre.flatten('C'),labels=np.arange(cls_num))
b,w,h = gt.shape
r = 0
for k in range(b):
r = 0
for i in range(w):
for j in range(h):
if pre[k,i,j] == 1 :
c = gt[k,max(0,i-t):min(w,i+t+1),max(0,j-t):min(h,j+t+1)]
if c[c==1].sum() > 1:
r += 1
c_matrix[0,1] = c_matrix[0,1] - (r - c_matrix[1,1])
c_matrix[1,1] = r
return c_matrix
def calculate_confusion_matrix0(targets,preds,cls_num=6):
'''
claculate confusion matrix for each target and its respoinding prediction
!!! for batch..it also works..I think
input:
targets: tensor
preds: tensor
'''
targets = targets.data.cpu().numpy().flatten('C')
preds = preds.data.cpu().numpy().flatten('C')
c_matrix = confusion_matrix(targets, preds,labels=np.arange(cls_num))
return c_matrix
def pixel_accuracy(confusion_matrix):
'''
calculate pixel accuracy based on confusion matrix
'''
return np.diag(confusion_matrix).sum()/confusion_matrix.sum()
def weighted_pixel_accuracy_class(confusion_matrix):
'''
calculate class pixel accuracy based on confusion matrix
'''
# set the value to 1 to avoid dividing zero problem,
# based on the characteristic of confusion value
freq = confusion_matrix.sum(axis=1)/confusion_matrix.sum()
num_each_class = confusion_matrix.sum(axis=1)
# num_each_class[num_each_class==0] = 1
with np.errstate(divide='ignore',invalid='ignore'):
cls_PA = np.diag(confusion_matrix)/num_each_class
w_cls_PA = np.multiply(freq[cls_PA>=0],cls_PA[cls_PA>=0]).sum()
return w_cls_PA
def IoU(confusion_matrix):
'''
calculate the intersection over Union for each class
'''
intersection = np.diag(confusion_matrix)
union_part = confusion_matrix.sum(axis=0) + confusion_matrix.sum(axis=1) - np.diag(confusion_matrix)
# # set the value to 1 to avoid dividing zero problem,
# # based on the characteristic of confusion value
# union_part[union_part==0] = 1
with np.errstate(divide='ignore',invalid='ignore'):
IoU = intersection / union_part
return IoU
def MIoU(IoU):
'''
calculate the mean intersection over Union for each class
'''
return np.nanmean(IoU)
def weighted_MIoU(confusion_matrix):
'''
calculate the weighted mean intersection over Union for each class
'''
freq = confusion_matrix.sum(axis=1)/confusion_matrix.sum()
intersection = np.diag(confusion_matrix)
union_part = confusion_matrix.sum(axis=0) + confusion_matrix.sum(axis=1) - np.diag(confusion_matrix)
# # set the value to 1 to avoid dividing zero problem,
# # based on the characteristic of confusion value
# union_part[union_part==0] = 1
with np.errstate(divide='ignore',invalid='ignore'):
IoU = intersection / union_part
w_MIoU = np.multiply(freq[IoU>=0],IoU[IoU>=0]).sum()
return w_MIoU
def evaluate_whole_dataset(confusion_matrix):
PA = pixel_accuracy(confusion_matrix)
class_PA = weighted_pixel_accuracy_class(confusion_matrix)
iou = IoU(confusion_matrix)
mIoU = MIoU(iou)
w_MIoU = weighted_MIoU(confusion_matrix)
pre = precision(confusion_matrix)
rca = recall(confusion_matrix)
f1 = F1(pre,rca)
return [PA, class_PA, mIoU, w_MIoU,pre,rca,f1]
def evaluate(targets,preds):
confusion_matrix = calculate_confusion_matrix(targets,preds,cls_num)
PA = pixel_accuracy(confusion_matrix)
class_PA = weighted_pixel_accuracy_class(confusion_matrix)
iou = IoU(confusion_matrix)
mIoU = MIoU(iou)
w_MIoU = weighted_MIoU(confusion_matrix)
results = [PA, class_PA, mIoU, w_MIoU]
return results
def error(preds, targets):
assert preds.size() == targets.size()
bs,h,w = preds.size()
n_pixels = bs*h*w
# for i in range(bs):
incorrect = preds.ne(targets).cpu().sum()
# torch.ne(input, other, out=None)
#computes input != other element-wise
err = incorrect.item()/n_pixels
# original err = incorrect/n_pixels,
# by adding .item convert incorrect from tensor to scalar
# (only one element in incorrect)
return round(err,5)
def error_singel_in_batch(preds, targets):
assert preds.size() == targets.size()
bs,h,w = preds.size()
n_pixels = bs*h*w
error = []
for i in range(bs):
incorrect = preds.ne(targets).cpu().sum()
err = incorrect.item() / n_pixels
error.append(round(err,5))
return error
def train(model, trn_loader, optimizer, criterion, epoch):
model.train()
trn_loss = 0
trn_error = 0
for idx, data in enumerate(trn_loader):
if idx % 100 == 0:
print(idx)
inputs = Variable(data[0].cuda())
targets = Variable(data[1].cuda())
optimizer.zero_grad()
output = model(inputs)
loss = criterion(output, targets)
loss.backward()
optimizer.step()
trn_loss += loss.data
# original:trn_loss += loss.data[0], but there is a bug
pred = get_predictions(output)
trn_error += error(pred, targets.data.cpu())
trn_loss /= len(trn_loader)
trn_error /= len(trn_loader)
return trn_loss, trn_error
def test(model, test_loader, criterion, epoch=1):
model.eval()
test_loss = 0
test_error = 0
for idx, data in enumerate(test_loader):
# for data, target in test_loader:
if idx % 20 == 0:
print(idx)
inputs = Variable(data[0].cuda())
targets = Variable(data[1].cuda())
# data = Variable(data.cuda(), volatile=True)
# target = Variable(target.cuda())
with torch.no_grad():
output = model(inputs)
test_loss += criterion(output, targets).item() #fix the bug here
pred = get_predictions(output)
test_error += error(pred, targets.data.cpu())
test_loss /= len(test_loader)
test_error /= len(test_loader)
return test_loss, test_error
def test1(model, test_loader, criterion, epoch=1, cls_num=2):
model.eval()
test_loss = 0
test_error = 0
c_matrix = np.zeros([cls_num, cls_num])
for idx, data in enumerate(test_loader):
# for data, target in test_loader:
if idx % 20 == 0:
print(idx)
inputs = Variable(data[0].cuda())
targets = Variable(data[1].cuda())
# data = Variable(data.cuda(), volatile=True)
# target = Variable(target.cuda())
with torch.no_grad():
output = model(inputs)
test_loss += criterion(output, targets).item() #fix the bug here
pred = get_predictions(output)
c_matrix += calculate_confusion_matrix0(targets,pred,cls_num)
test_error += error(pred, targets.data.cpu())
result = evaluate_whole_dataset(c_matrix)
test_loss /= len(test_loader)
test_error /= len(test_loader)
return test_loss, result
def adjust_learning_rate(lr, decay, optimizer, cur_epoch, n_epochs):
"""Sets the learning rate to the initially
configured `lr` decayed by `decay` every `n_epochs`"""
new_lr = lr * (decay ** (cur_epoch // n_epochs))
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def weights_init(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight)
m.bias.data.zero_()
def predict(model, input_loader, n_batches=1):
input_loader.batch_size = 1
predictions = []
model.eval()
for input, target in input_loader:
data = Variable(input.cuda(), volatile=True)
label = Variable(target.cuda())
output = model(data)
pred = get_predictions(output)
predictions.append([input,target,pred])
return predictions
def view_sample_predictions(model, loader, n):
inputs, targets = next(iter(loader))
data = Variable(inputs.cuda(), volatile=True)
label = Variable(targets.cuda())
with torch.no_grad():
output = model(data)
pred = get_predictions(output)
batch_size = inputs.size(0)
for i in range(min(n, batch_size)):
img_utils.view_image(inputs[i])
img_utils.view_annotated(targets[i])
img_utils.view_annotated(pred[i])
return pred[i]
def view_sample_predictions_new(model, inputs, targets, n):
#inputs, targets = next(iter(loader))
data = Variable(inputs.cuda(), volatile=True)
#label = Variable(targets.cuda())
with torch.no_grad():
output = model(data)
pred = get_predictions(output)
batch_size = inputs.size(0)
err = error(pred, targets.data.cpu())
for i in range(min(n, batch_size)):
#img_utils.view_image(inputs[i])
img_utils.view_annotated(targets[i])
img_utils.view_annotated(pred[i])
return pred[i]
``` |
{
"source": "jiaqi-w/machine_learning",
"score": 2
} |
#### File: deep_learning/convolutional_neural_network/cnn_merge_nlp_model.py
```python
import config
import os, re
from sklearn.preprocessing import LabelEncoder
from keras.constraints import max_norm
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers import Input
from keras.models import Model
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.merge import concatenate
from keras.models import Sequential
from ml_algo.deep_learning.deep_nlp_abstract_class import Deep_NLP_Abstract_Class
from keras.layers.merge import Concatenate
from keras.layers import InputLayer
import config
import os, re
from utils.file_logger import File_Logger_Helper
import numpy as np
from keras.callbacks import TensorBoard
from keras.models import Sequential
from keras.models import Model
from keras.optimizers import Adam
from keras.layers import Dense
from keras.models import load_model
import pandas as pd
from sklearn.utils import compute_class_weight
from ml_algo.preprocessing.feature_processing import Feature_Processing
from ml_algo.preprocessing.word_embedding import Word_Embedding
from ml_algo.evaluation.model_evaluator import Model_Evaluator
__author__ = "Jiaqi"
__version__ = "1"
__date__ = "Nov 1 2018"
class CNN_Merge_NLP_Model(Deep_NLP_Abstract_Class):
def __init__(self,
classifier_name="cnn",
num_words=10000,
max_text_len=1600,
embedding_vector_dimension=100,
embedding_fname=os.path.join(config.WORD_EMBEDDING_DIR, 'glove.6B.100d.txt'),
data_name="data",
feature_name="f1.f2",
target_name="t",
num_class=1,
kernel_initializer='glorot_uniform',
num_custome_features=0,
num_filter=2,
keneral_size_list=(2,3,4),
pool_size=1,
drop_perc=0.5,
l2_constraint=3,
model_learning_rate=1e-3,
model_weight_decate_rate=0.7,
model_weight_imbalance_class=False,
batch_size=100,
epochs=10,
replace_exists=False,
logger=None):
self.num_custome_features = num_custome_features
self.num_filter = num_filter
self.keneral_size_list = keneral_size_list
self.pool_size = pool_size
self.drop_perc = drop_perc
self.l2_constraint = l2_constraint
# the super() has to follow the parameter init since the get_custom_name() is invoked with the require value.
super().__init__(
classifier_name=classifier_name,
num_words=num_words,
max_text_len=max_text_len,
embedding_vector_dimension=embedding_vector_dimension,
embedding_fname=embedding_fname,
data_name=data_name,
feature_name=feature_name,
target_name=target_name,
num_class=num_class,
kernel_initializer=kernel_initializer,
batch_size=batch_size,
epochs=epochs,
model_learning_rate=model_learning_rate,
model_weight_decate_rate=model_weight_decate_rate,
model_weight_imbalance_class=model_weight_imbalance_class,
replace_exists=replace_exists,
logger=logger
)
def get_custom_name(self):
# return custom name for the define model.
model_name = None
if self.num_filter is not None:
if model_name is None:
model_name = "{}numfilter".format(self.num_filter)
else:
model_name = "{}_{}numfilter".format(model_name, self.num_filter)
kernal_size_name = re.sub(r"\s+", "", str(self.keneral_size_list))
if kernal_size_name is not None:
if model_name is None:
model_name = "{}kernal".format(kernal_size_name)
else:
model_name = "{}_{}kernal".format(model_name, kernal_size_name)
if self.pool_size is not None:
if model_name is None:
model_name = "{}pool".format(self.pool_size)
else:
model_name = "{}_{}pool".format(model_name, self.pool_size)
drop = round(self.drop_perc, 2)
if drop is not None:
if model_name is None:
model_name = "{}drop".format(drop)
else:
model_name = "{}_{}drop".format(model_name, drop)
if self.num_filter is not None:
if model_name is None:
model_name = "{}norm".format(self.l2_constraint)
else:
model_name = "{}_{}norm".format(model_name, self.l2_constraint)
if self.num_custome_features is not None and self.num_custome_features != 0:
if model_name is None:
model_name = "{}custfeature".format(self.num_custome_features)
else:
model_name = "{}_{}custfeature".format(model_name, self.num_custome_features)
return model_name
def define_model(self):
pass
def train(self, X_text:pd.Series, y_train:pd.Series, X_features:pd.Series=None):
'''
Reference: "A Sensitivity Analysis of (and Practitioners’ Guide to) Convolutional Neural Networks for Sentence Classification"
https://keras.io/getting-started/functional-api-guide/
:return:
'''
# Initial the embedding layer. Don't replace the embedding since it could be shared between different models.
if self.embedding_helper is not None:
self.embedding_layer = self.embedding_helper.init_embedding_layer(X_text)
# Pad the sequence to the same length
X_text = self.embedding_helper.encode_X(X_text)
if X_features is not None and X_features.shape[1] > 0:
# Merge the features
# X_features = X_features.values
self.logger.info("X_text shape {}".format(X_text.shape))
self.logger.info("X_features shape {}".format(X_features.shape))
self.logger.info("X_features type {}".format(type(X_features)))
# TODO: deal with missing value.
X_features = X_features.fillna(0)
# X_text = np.reshape(X_text, (X_text.shape[0], X_text.shape[1], 1))
# X_features = np.reshape(X_features, (X_features.shape[0], X_features.shape[1], 1))
# X_train = [X_text, X_features]
X_train = {"text_input" : X_text, "feature_input": X_features}
# self.logger.info("X_train shape {}".format(X_train.shape))
self.logger.info("Concatenate features X_train {}".format(X_train))
else:
X_train = X_text
print("y_train",y_train)
print("y_train.shape",y_train.shape)
# if len(y_train.shape) > 1 and y_train.shape[1] == 1 or isinstance(y_train[0], str):
if len(y_train.shape) <= 1 or y_train.shape[1] == 1:
y_train = self.feature_preprocessing.encode_y(y_train)
if self.model == None:
input_layers = []
input_text = Input(shape=(self.max_text_len,), name="text_input")
input_layers.append(input_text)
embedding = self.embedding_layer(input_text)
univariate_vectors = []
for filter_size in self.keneral_size_list:
# channel i
print("filter_size", filter_size)
conv1d = Conv1D(filters=self.num_filter, kernel_size=filter_size, activation='relu')(embedding)
# dropout to avoid overfitting
drop = Dropout(self.drop_perc)(conv1d)
pool1d = MaxPooling1D(pool_size=self.pool_size)(drop)
flat = Flatten()(pool1d)
print("flat.shape: {}".format(flat._keras_shape))
univariate_vectors.append(flat)
print("input_layers[0].shape:", input_layers[0].shape)
# # TODO if num_custome_features == 0, don't add it, same for fix.
if self.num_custome_features is not None and self.num_custome_features > 0:
input_features = Input(shape=(self.num_custome_features,), name="feature_input")
print("input_features.shape:", input_features._keras_shape)
input_layers.append(input_features)
univariate_vectors.append(input_features)
# dense_feature = Dense(self.num_custome_features, activation='linear', name="linear")(input_features)
# univariate_vectors.append(dense_feature)
merged = concatenate(univariate_vectors, name="merge_vector")
# print("merged.shape:", merged._keras_shape)
# Please note that this input layers must be consistant with the model.fit()
# self.model.add(Model(inputs=input_layers, outputs=merged, name="input_encoder"))
# regularization
# dense_regularize = Dense(10, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(merged)
# TODO: tune this parameter in the future.
num_dense_units = self.num_filter * len(self.keneral_size_list)
if self.l2_constraint == 0:
regular_layer = Dense(num_dense_units, activation='relu', name="regularization")(merged)
else:
regular_layer = Dense(num_dense_units, activation='relu',
kernel_constraint=max_norm(self.l2_constraint), name="regularization")(merged)
# adam = Adam(lr=self.model_learning_rate, decay=self.model_weight_decate_rate)
if self.num_class == 1:
# for the imbalanced data. kernel_initializer='uniform',
# samples are drawn from a uniform distribution.csv within [-limit, limit], with limit = sqrt(3 * scale / n)
# self.model.add(Dense(self.num_class, activation='softmax', kernel_initializer='uniform'))
# "sigmoid", ""logistic function
# And add a logistic regression on top.
output = Dense(1, activation='sigmoid', kernel_initializer=self.kernel_initializer)(regular_layer)
self.model = Model(inputs=input_layers, outputs=output, name="output_layer")
# self.model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
self.model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# self.model.compile(loss='binary_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])
else:
output = Dense(self.num_class, activation='softmax', kernel_initializer=self.kernel_initializer)(regular_layer)
self.model = Model(inputs=input_layers, outputs=output, name="output_layer")
# self.model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
self.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
self.logger.info("summarize:\n{}".format(self.model.summary()))
# Log to tensorboard
tensorBoardCallback = TensorBoard(log_dir=config.LOG_DIR, write_graph=True)
# self.logger.info("X_train={}".format(X_text))
self.logger.info("X_train={}".format(X_train))
self.logger.info("y_train={}".format(y_train))
# batch_size https://keras.io/getting-started/sequential-model-guide/
if self.model_weight_imbalance_class:
class_weight = compute_class_weight('balanced', np.unique(y_train), y_train)
else:
class_weight = None
self.model.fit(X_train, y_train, class_weight=class_weight,
batch_size=self.batch_size, epochs=self.epochs,
callbacks=[tensorBoardCallback])
self.store_model()
else:
self.logger.info("Trained model {}".format(self.model_name))
def evaluate_model(self, X_text_test:pd.Series, y_test:pd.Series,
X_feature_test:pd.Series=None,
output_evaluate_dir=config.EVALUATE_DATA_DIR):
if self.model == None:
self.logger.error("Please train the model first. There is no model for {}".format(self.model_name))
return
self.logger.info("Evalute model {}".format(self.model_name))
# self.logger.info("X_test={}".format(X_test))
if self.embedding_helper is not None:
X_encode = self.embedding_helper.encode_X(X_text_test)
else:
X_encode = X_text_test
if X_feature_test is not None and X_feature_test.shape[1] > 0:
# Merge the features
# X_encode = [X_encode, X_feature_test]
X_encode = {"text_input": X_encode, "feature_input": X_feature_test}
# loss, acc = self.model.evaluate(X_encode, y_test, verbose=0)
# print('Test Loss: %f' % (loss))
# print('Test Accuracy: %f' % (acc * 100))
# y_pred = self.model.predict_classes(X_encode)
y_pred = np.asarray(self.model.predict(X_encode))
self.logger.info("y_pred {}".format(y_pred))
if self.num_class == 1:
pred = []
for p in y_pred:
if p > 0.5:
pred.append(1)
else:
pred.append(0)
y_pred = pred
else:
# y_pred = y_pred.argmax(axis=-1)
# y_pred = np.argmax(y_pred, axis=1)
y_convert = np.zeros_like(y_pred)
y_convert[np.arange(len(y_pred)), y_pred.argmax(axis=1)] = 1
print("convert y {}".format(y_convert))
y_pred = y_convert
self.logger.info("y_pred {}".format(y_pred))
# self.logger.info("y_pred.shape {}".format(y_pred.shape))
if len(y_test.shape) <= 1 or y_test.shape[1] == 1:
y_test = self.feature_preprocessing.encode_y(y_test)
self.logger.info("y_test {}".format(y_test))
# self.logger.info("y_test.shape {}".format(y_test.shape))
# model_evaluator = Model_Evaluator(y_gold=list(y_test.flatten().tolist()), y_pred=list(y_pred.flatten().tolist()), X_gold=X_text_test)
if self.num_class == 1:
model_evaluator = Model_Evaluator(y_gold=list(y_test.flatten().tolist()), y_pred=y_pred, X_gold=X_text_test)
else:
model_evaluator = Model_Evaluator(y_gold=y_test, y_pred=y_pred, X_gold=X_text_test, is_multi_class=True)
fieldnames = model_evaluator.get_evaluation_fieldnames()
evaluate_fname, predict_fname, cm_fname = None, None, None
if output_evaluate_dir is not None:
evaluate_fname = os.path.join(output_evaluate_dir, "{}_evaluate.csv".format(self.model_name))
predict_fname = os.path.join(output_evaluate_dir, "{}_predict.csv".format(self.model_name))
cm_fname = os.path.join(output_evaluate_dir, "{}_cm.csv".format(self.model_name))
evaluate_dict = model_evaluator.get_evaluation_dict(evaluation_fname=evaluate_fname,
predict_fname=predict_fname,
cm_fname=cm_fname,
show_cm=False)
# # if self.feature_preprocessing.label_encoder is None:
# # self.feature_preprocessing.label_encoder = LabelEncoder()
# self.feature_preprocessing.label_encoder = LabelEncoder()
#
# self.logger.info("label inverse_transform")
# # TODO: fix me pleassssssse
# self.feature_preprocessing.label_encoder.fit(["yes", "no"])
# y_pred = pd.DataFrame(self.feature_preprocessing.label_encoder.inverse_transform(y_pred))
#
# self.logger.info("y_pred {}".format(y_pred))
return fieldnames, evaluate_dict, y_pred
def predict_y(self, X_text_pred:pd.Series, X_feature_pred:pd.Series):
if self.embedding_helper is not None:
X_encode = self.embedding_helper.encode_X(X_text_pred)
else:
X_encode = X_text_pred
if X_feature_pred is not None and X_feature_pred.shape[1] > 0:
# Merge the features
X_encode = {"text_input": X_encode, "feature_input": X_feature_pred}
y_pred = self.model.predict(X_encode)
print("self.num_class", self.num_class)
if self.num_class == 1:
pred = []
for p in y_pred:
if p > 0.5:
pred.append(1)
else:
pred.append(0)
y_pred = pred
else:
# y_pred = y_pred.argmax(axis=-1)
# y_pred = np.argmax(y_pred, axis=1)
y_convert = np.zeros_like(y_pred)
y_convert[np.arange(len(y_pred)), y_pred.argmax(axis=1)] = 1
print("convert y {}".format(y_convert))
y_pred = y_convert
self.logger.info("y_pred {}".format(y_pred))
# print("self.feature_preprocessing.name", self.feature_preprocessing.data_lable_name)
print("label_encoder", self.feature_preprocessing.label_encoder)
if self.num_class == 1:
# if self.feature_preprocessing.label_encoder is None:
# self.feature_preprocessing.label_encoder = LabelEncoder()
self.feature_preprocessing.label_encoder = LabelEncoder()
self.logger.info("label inverse_transform")
# TODO: fix me pleassssssse
self.feature_preprocessing.label_encoder.fit(["yes", "no"])
y_pred = pd.DataFrame(self.feature_preprocessing.label_encoder.inverse_transform(y_pred))
self.logger.info("y_pred {}".format(y_pred))
return y_pred
```
#### File: deep_learning/convolutional_neural_network/cnn_nlp_model.py
```python
import config
import os, re
from keras.constraints import max_norm
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers import Input
from keras.models import Model
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.merge import concatenate
from keras.models import Sequential
from ml_algo.deep_learning.deep_nlp_abstract_class import Deep_NLP_Abstract_Class
__author__ = "Jiaqi"
__version__ = "1"
__date__ = "Nov 1 2018"
class CNN_NLP_Model(Deep_NLP_Abstract_Class):
def __init__(self,
classifier_name="cnn",
num_words=10000,
max_text_len=1600,
embedding_vector_dimension=100,
embedding_fname=os.path.join(config.WORD_EMBEDDING_DIR, 'glove.6B.100d.txt'),
data_name="data",
feature_name="f1.f2",
target_name="t",
num_class=1,
kernel_initializer='glorot_uniform',
num_filter=2,
keneral_size_list=(2,3,4),
pool_size=1,
drop_perc=0.5,
l2_constraint=3,
model_learning_rate=1e-3,
model_weight_decate_rate=0.7,
model_weight_imbalance_class=False,
batch_size=100,
epochs=10,
replace_exists=False,
logger=None):
self.num_filter = num_filter
self.keneral_size_list = keneral_size_list
self.pool_size = pool_size
self.drop_perc = drop_perc
self.l2_constraint = l2_constraint
# the super() has to follow the parameter init since the get_custom_name() is invoked with the require value.
super().__init__(
classifier_name=classifier_name,
num_words=num_words,
max_text_len=max_text_len,
embedding_vector_dimension=embedding_vector_dimension,
embedding_fname=embedding_fname,
data_name=data_name,
feature_name=feature_name,
target_name=target_name,
num_class=num_class,
kernel_initializer=kernel_initializer,
batch_size=batch_size,
epochs=epochs,
model_learning_rate=model_learning_rate,
model_weight_decate_rate=model_weight_decate_rate,
model_weight_imbalance_class=model_weight_imbalance_class,
replace_exists=replace_exists,
logger=logger
)
def get_custom_name(self):
# return custom name for the define model.
model_name = None
if self.num_filter is not None:
if model_name is None:
model_name = "{}numfilter".format(self.num_filter)
else:
model_name = "{}_{}numfilter".format(model_name, self.num_filter)
kernal_size_name = re.sub(r"\s+", "", str(self.keneral_size_list))
if kernal_size_name is not None:
if model_name is None:
model_name = "{}kernal".format(kernal_size_name)
else:
model_name = "{}_{}kernal".format(model_name, kernal_size_name)
if self.pool_size is not None:
if model_name is None:
model_name = "{}pool".format(self.pool_size)
else:
model_name = "{}_{}pool".format(model_name, self.pool_size)
drop = round(self.drop_perc, 2)
if drop is not None:
if model_name is None:
model_name = "{}drop".format(drop)
else:
model_name = "{}_{}drop".format(model_name, drop)
if self.num_filter is not None:
if model_name is None:
model_name = "{}norm".format(self.l2_constraint)
else:
model_name = "{}_{}norm".format(model_name, self.l2_constraint)
return model_name
def define_model(self):
'''
Reference: "A Sensitivity Analysis of (and Practitioners’ Guide to) Convolutional Neural Networks for Sentence Classification"
:return:
'''
self.model = Sequential()
input = Input(shape=(self.max_text_len,))
embedding = self.embedding_layer(input)
univariate_vectors = []
for filter_size in self.keneral_size_list:
# channel i
conv1d = Conv1D(filters=self.num_filter, kernel_size=filter_size, activation='relu')(embedding)
# dropout to avoid overfitting
drop = Dropout(self.drop_perc)(conv1d)
pool1d = MaxPooling1D(pool_size=self.pool_size)(drop)
flat = Flatten()(pool1d)
univariate_vectors.append(flat)
merged = concatenate(univariate_vectors)
self.model.add(Model(inputs=[input], outputs=merged))
# regularization
# dense_regularize = Dense(10, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(merged)
# TODO: tune this parameter in the future.
num_dense_units = self.num_filter * len(self.keneral_size_list)
if self.l2_constraint == 0:
# dense_regularize = Dense(num_dense_units, activation='relu')(merged)
self.model.add(Dense(num_dense_units, activation='relu', name="dense_regularize"))
else:
# dense_regularize = Dense(num_dense_units, activation='relu', kernel_constraint=max_norm(self.l2_constraint))(merged)
self.model.add(Dense(num_dense_units, activation='relu', kernel_constraint=max_norm(self.l2_constraint)))
```
#### File: deep_learning/ensemble_neural_network/cnn_rnn_nlp_model.py
```python
import config
import os, re
from keras.callbacks import TensorBoard
from keras import regularizers
from keras.constraints import max_norm
from keras.layers import Convolution1D
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Embedding
from keras.layers import Flatten
from keras.layers import Input
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.merge import concatenate
from keras.models import Sequential
from keras.models import Model
from keras.models import load_model
from sklearn.metrics import classification_report
from sklearn.metrics import precision_recall_fscore_support
import pandas as pd
from ml_algo.preprocessing.feature_processing import Feature_Processing
from ml_algo.preprocessing.word_embedding import Word_Embedding
from ml_algo.evaluation.model_evaluator import Model_Evaluator as cm
from keras.layers import LSTM
from keras.layers import Merge
from ml_algo.evaluation.model_evaluator import Model_Evaluator
from ml_algo.deep_learning.deep_nlp_abstract_class import Deep_NLP_Abstract_Class
__author__ = "Jiaqi"
__version__ = "1"
__date__ = "Nov 31 2018"
class CNN_RNN_NLP_Model(Deep_NLP_Abstract_Class):
# add more cnn layers
def __init__(self,
classifier_name="cnn_rnn",
num_words=10000,
max_text_len=1600,
embedding_vector_dimension=100,
embedding_fname=os.path.join(config.WORD_EMBEDDING_DIR, 'glove.6B.100d.txt'),
data_name="data",
feature_name="f1.f2",
target_name="t",
num_class=1,
kernel_initializer='glorot_uniform',
num_filter=64,
keneral_size=5,
pool_size=1,
drop_perc=0.5,
l2_constraint=3,
batch_size=100,
epochs=10,
model_learning_rate=1e-3,
model_weight_decate_rate=0.7,
model_weight_imbalance_class=False,
replace_exists=False,
logger=None):
self.num_filter = num_filter
self.keneral_size = keneral_size
self.pool_size = pool_size
self.drop_perc = drop_perc
# self.weight_decay = 1e-4
self.l2_constraint = l2_constraint
# the super() has to follow the parameter init since the get_custom_name() is invoked with the require value.
super().__init__(
classifier_name=classifier_name,
num_words=num_words,
max_text_len=max_text_len,
embedding_vector_dimension=embedding_vector_dimension,
embedding_fname=embedding_fname,
data_name=data_name,
feature_name=feature_name,
target_name=target_name,
num_class=num_class,
kernel_initializer=kernel_initializer,
batch_size=batch_size,
epochs=epochs,
model_learning_rate=model_learning_rate,
model_weight_decate_rate=model_weight_decate_rate,
model_weight_imbalance_class=model_weight_imbalance_class,
replace_exists=replace_exists,
logger=logger
)
def get_custom_name(self):
# return custom name for the define model.
model_name = None
if self.num_filter is not None:
if model_name is None:
model_name = "{}numfilter".format(self.num_filter)
else:
model_name = "{}_{}numfilter".format(model_name, self.num_filter)
if self.keneral_size is not None:
if model_name is None:
model_name = "{}kernal".format(self.keneral_size)
else:
model_name = "{}_{}kernal".format(model_name, self.keneral_size)
if self.pool_size is not None:
if model_name is None:
model_name = "{}pool".format(self.pool_size)
else:
model_name = "{}_{}pool".format(model_name, self.pool_size)
drop = round(self.drop_perc, 2)
if drop is not None:
if model_name is None:
model_name = "{}drop".format(drop)
else:
model_name = "{}_{}drop".format(model_name, drop)
if self.num_filter is not None:
if model_name is None:
model_name = "{}norm".format(self.l2_constraint)
else:
model_name = "{}_{}norm".format(model_name, self.l2_constraint)
return model_name
# Reference: "A Sensitivity Analysis of (and Practitioners’ Guide to) Convolutional Neural Networks for Sentence Classification"
def define_model(self):
self.logger.info("Training model {}".format(self.model_name))
# inputs = []
# input = Input(shape=(self.max_text_len,))
# embedding = self.embedding_layer(input)
# univariate_vectors = []
# for filter_size in self.keneral_size_list:
# # channel i
# conv1d = Conv1D(filters=self.num_filter, kernel_size=filter_size, activation='relu')(embedding)
# # dropout to avoid overfitting
# drop = Dropout(self.drop_perc)(conv1d)
# pool1d = MaxPooling1D(pool_size=self.pool_size)(drop)
# # flat = Flatten()(pool1d)
#
# inputs.append(input)
# # univariate_vectors.append(flat)
# univariate_vectors.append(pool1d)
#
# # merged = concatenate(univariate_vectors)
# merged = Merge(univariate_vectors, mode='concat')
# merged_flat = Flatten()(merged)
#
# # https://github.com/keras-team/keras/issues/5032
# # merged = Merge(univariate_vectors, mode='concat', concat_axis=2)
# # # Output from merge is (batch_size, sequence_length, 3*dim, 4, 5)
# # # We want to get this down to (batch_size, sequence_length, 120*4*5)
#
# # regularization
# # dense_regularize_features = Dense(10, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(merged)
# num_dense_units = self.num_filter * len(self.keneral_size_list)
# if self.l2_constraint == 0:
# dense_regularize_features = Dense(num_dense_units, activation='relu')(merged_flat)
# else:
# dense_regularize_features = Dense(num_dense_units, activation='relu', kernel_constraint=max_norm(self.l2_constraint))(merged_flat)
# flatten = Flatten()(dense_regularize_features)
# self.model = Sequential()
# cnn_model = Model(inputs=[input], outputs=merged_flat)
# self.model.add(cnn_model)
# self.model.add(Conv1D(filters=2, kernel_size=2, activation='relu'))
# self.model.add(Dense(1, activation='sigmoid'))
# self.model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# self.logger.info("summarize:\n{}".format(self.model.summary()))
self.model = Sequential()
self.model.add(self.embedding_layer)
self.model.add(Conv1D(self.num_filter, self.keneral_size, activation='relu'))
self.model.add(MaxPooling1D(pool_size=self.pool_size))
self.model.add(LSTM(self.embedding_helper.embedding_vector_dimension))
self.model.add(Dropout(self.drop_perc))
```
#### File: deprecated/recursive_neural_network/rnn_nlp_model.py
```python
import config
import os, re
from utils.file_logger import File_Logger_Helper
import numpy as np
from keras.callbacks import TensorBoard
from keras import regularizers
from keras.constraints import max_norm
from keras.layers import Convolution1D
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Embedding
from keras.layers import Flatten
from keras.layers import Input
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.merge import concatenate
from keras.models import Sequential
from keras.models import Model
from keras.models import load_model
from sklearn.utils import class_weight
from sklearn.utils import compute_class_weight
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import classification_report
from sklearn.metrics import precision_recall_fscore_support
import pandas as pd
from ml_algo.preprocessing.feature_processing import Feature_Processing
from ml_algo.preprocessing.word_embedding import Word_Embedding
from ml_algo.evaluation.model_evaluator import Model_Evaluator as cm
from keras.layers import LSTM
from keras.optimizers import Adam
from ml_algo.evaluation.model_evaluator import Model_Evaluator
__author__ = "Jiaqi"
__version__ = "1"
__date__ = "Nov 31 2018"
class RNN_NLP_Model():
def __init__(self,
classifier_name="cnn",
num_words=10000,
max_text_len=1600,
embedding_vector_dimension=100,
glove_fname=os.path.join(config.WORD_EMBEDDING_DIR, 'glove.6B.100d.txt'),
data_name="data",
feature_name="f1.f2",
target_name="t",
num_class=1,
kernel_initializer='glorot_uniform',
num_lstm_layer=5,
drop_perc=0.1,
learning_rate=1e-3,
weight_decate_rate=0.7,
l2_constraint=0,
batch_size=100,
epochs=10,
logger=None):
self.logger = logger or File_Logger_Helper.get_logger(logger_fname="CNN.log")
self.feature_preprocessing = Feature_Processing()
self.classifier_name = classifier_name
self.num_class = num_class
self.kernel_initializer = kernel_initializer
self.num_words = num_words
self.num_steps = max_text_len
self.num_lstm_layer = num_lstm_layer
self.drop_perc = drop_perc
self.learning_rate = learning_rate
self.weight_decate_rate = weight_decate_rate
self.weight_decay = 1e-4
self.l2_constraint = l2_constraint
self.batch_size = batch_size
self.epochs = epochs
self.model = None
# Initial the embedding layer.
if glove_fname is not None:
self.embedding_helper = Word_Embedding(embedding_fname=glove_fname)
if embedding_vector_dimension != self.embedding_helper.embedding_vector_dimension:
self.logger.error(
"Error, the embedding vector dimension should be {} instead of {}. Fix embedding_vector_dimension to {}".format(
self.embedding_helper.embedding_vector_dimension,
embedding_vector_dimension,
self.embedding_helper.embedding_vector_dimension,
))
self.embedding_vector_dimension = self.embedding_helper.embedding_vector_dimension
self.embedding_name = "{}_{}_{}_{}".format(re.sub(r"\.txt", "_", os.path.basename(glove_fname)), data_name, feature_name, target_name)
else:
# If the embedding is not specified, we would use the plain token vector.
self.embedding_helper = Word_Embedding()
self.embedding_vector_dimension = embedding_vector_dimension
self.embedding_name = "{}_{}_{}_{}".format("token_vector", data_name, feature_name, target_name)
preprocess_name =self.embedding_helper.generate_model_name(embedding_name=self.embedding_name,
num_words=num_words,
embedding_vector_dimension=embedding_vector_dimension,
max_text_len=max_text_len)
self.load_model_if_exists(classifier_name=classifier_name,
preprocess_name=preprocess_name)
def reset_max_text_len(self, max_text_len=1600):
self.num_steps = max_text_len
preprocess_name =self.embedding_helper.generate_model_name(embedding_name=self.embedding_name,
num_words=self.num_words,
embedding_vector_dimension=self.embedding_vector_dimension,
max_text_len=max_text_len)
self.load_model_if_exists(classifier_name=self.classifier_name,
preprocess_name=preprocess_name)
def generate_model_name(self,
general_name,
preprocess_name=None,
):
model_name = "{}_{}class_{}layer_{}drop_{}lr_{}dr_{}norm_{}ki_{}batch_{}epoch".format(general_name,
self.num_class,
self.num_lstm_layer,
round(self.drop_perc, 2),
self.learning_rate,
self.weight_decate_rate,
self.l2_constraint,
self.kernel_initializer,
self.batch_size,
self.epochs)
if preprocess_name is not None:
model_name = "{}_{}".format(model_name, preprocess_name)
self.model_name = model_name
self.logger.info("model_name={}".format(model_name))
return model_name
def load_model_if_exists(self,
classifier_name="general",
preprocess_name="general",
dump_model_dir=config.PREROCESS_PICKLES_DIR):
# Load the file is not already done so. If there is no pickle created, train one for it.
self.logger.info("Load Model")
self.dump_model_dir = dump_model_dir
if not os.path.exists(dump_model_dir):
os.makedirs(dump_model_dir)
self.model_name = self.generate_model_name(classifier_name, preprocess_name=preprocess_name)
self.dump_model_fname = os.path.join(dump_model_dir, "{}.h5".format(self.model_name))
if os.path.exists(self.dump_model_fname):
self.model = load_model(self.dump_model_fname)
def store_model(self, replace_exists=False):
if not os.path.exists(self.dump_model_fname) or replace_exists is True:
self.model.save(self.dump_model_fname)
def train(self, X_train:pd.Series, y_train:pd.Series, replace_exists=False):
"""
Reference
Dialogue Act Classification in Domain-Independent Conversations Using a Deep Recurrent Neural Network
"""
# Initial the embedding layer. Don't replace the embedding.
self.embedding_layer = self.embedding_helper.init_embedding_layer(X_train.values)
# Pad the sequence to the same length
X_train = self.embedding_helper.encode_X(X_train)
# if isinstance(y_train[0], str):
y_train = self.feature_preprocessing.encode_y(y_train)
if self.model == None or replace_exists:
self.logger.info("Training model {}".format(self.model_name))
self.model = Sequential()
self.model.add(self.embedding_layer)
self.model.add(LSTM(self.embedding_vector_dimension))
# self.model.add(Dropout(self.drop_perc))
# for i in range(1, self.num_lstm_layer):
# self.model.add(LSTM(self.embedding_vector_dimension, return_sequences=True, dropout=self.drop_perc))
# # 256
# self.model.add(Dense(256, activation='relu', name='FC1'))
# self.model.add(Dropout(self.drop_perc))
# adam = Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
adam = Adam(lr=self.learning_rate, decay=self.weight_decate_rate)
if self.num_class == 1:
# for the imbalanced data. kernel_initializer='uniform',
# samples are drawn from a uniform distribution within [-limit, limit], with limit = sqrt(3 * scale / n)
# self.model.add(Dense(self.num_class, activation='softmax', kernel_initializer='uniform'))
# "sigmoid", ""logistic function
self.model.add(Dense(1, activation='sigmoid', kernel_initializer=self.kernel_initializer))
self.model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
# self.model.compile(loss='binary_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])
else:
self.model.add(Dense(self.num_class, activation='softmax', kernel_initializer=self.kernel_initializer))
self.model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
self.logger.info("summarize:\n{}".format(self.model.summary()))
# Log to tensorboard
tensorBoardCallback = TensorBoard(log_dir=config.LOG_DIR, write_graph=True)
self.logger.info("X_train={}".format(X_train))
self.logger.info("y_train={}".format(y_train))
# batch_size https://keras.io/getting-started/sequential-model-guide/
class_weights = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)
self.logger.info("check balance class_weights for imbalance data {}".format(class_weights))
unique, counts = np.unique(y_train, return_counts=True)
# class_weights = dict(zip(unique, counts))
class_weights = compute_class_weight('balanced', np.unique(y_train), y_train)
self.logger.info("add class_weights for imbalance data {}".format(class_weights))
self.model.fit(X_train, y_train, class_weight=class_weights,
batch_size=self.batch_size, epochs=self.epochs, callbacks=[tensorBoardCallback])
self.store_model(replace_exists=replace_exists)
else:
self.logger.info("Trained model {}".format(self.model_name))
def vanilla_rnn_model(self):
# disavantage vanishing gradient problem for long sentence.
pass
def evaluate_model(self, X_test:pd.Series, y_test:pd.Series, output_evaluate_dir=config.EVALUATE_DATA_DIR):
if self.model == None:
self.logger.error("Please train the model first. There is no model for {}".format(self.model_name))
self.logger.info("Evalute model {}".format(self.model_name))
# self.logger.info("X_test={}".format(X_test))
X_encode = self.embedding_helper.encode_X(X_test)
# accuracy
# scores = self.model.evaluate(X_test, y_test, verbose=0)
# self.logger.info("Accuracy: %.2f%%" % (scores[-1] * 100))
y_pred = self.model.predict_classes(X_encode)
# y_pred = self.model.predict(X_test)
# y_pred = y_pred.argmax(axis=-1)
self.logger.info("y_pred {}".format(y_pred))
y_test = self.feature_preprocessing.encode_y(y_test)
self.logger.info("y_test {}".format(y_test))
model_evaluator = Model_Evaluator(y_gold=list(y_test.tolist()), y_pred=y_pred.flatten().tolist(), X_gold=X_test)
fieldnames = model_evaluator.get_evaluation_fieldnames()
evaluate_fname, predict_fname, cm_fname = None, None, None
if output_evaluate_dir is not None:
evaluate_fname = os.path.join(output_evaluate_dir, "{}_evaluate.csv".format(self.model_name))
predict_fname = os.path.join(output_evaluate_dir, "{}_predict.csv".format(self.model_name))
cm_fname = os.path.join(output_evaluate_dir, "{}_cm.csv".format(self.model_name))
evaluate_dict = model_evaluator.get_evaluation_dict(evaluation_fname=evaluate_fname,
predict_fname=predict_fname,
cm_fname=cm_fname,
show_cm=False)
return fieldnames, evaluate_dict, y_pred
```
#### File: ml_algo/evaluation/model_evaluator.py
```python
import config
import os
import numpy as np
import pandas as pd
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import itertools
import csv
from sklearn import metrics
from utils.file_logger import File_Logger_Helper
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import auc
__author__ = "Jiaqi"
__version__ = "1"
__date__ = "Nov 9 2018"
class Model_Evaluator():
def __init__(self, y_gold:list, y_pred:list, X_gold:pd.Series=None, is_multi_class=False, logger=None):
# Please note that the list of gold and predict should have the original label when they pass in.
self.X_gold = X_gold
self.y_gold = y_gold
self.y_pred = y_pred
self.is_multi_class = is_multi_class
if is_multi_class is True:
self.class_names = list(range(self.y_gold.shape[1]))
else:
self.class_names = list(set(self.y_gold + self.y_pred))
self.logger = logger or File_Logger_Helper.get_logger(logger_fname="evaluate.log")
def get_evaluation_fieldnames(self, with_cm=True):
fieldnames = []
for class_name in self.class_names:
# evaluation metric header
fieldnames += ["{}_prec".format(class_name), "{}_recall".format(class_name), "{}_f1".format(class_name), "{}_support".format(class_name)]
fieldnames += ['accuracy', 'roc_auc',
'macro_prec', 'macro_recall', 'macro_f1',
'micro_prec', 'micro_recall', 'micro_f1',
"weighted_prec", "weighted_recall", "weighted_f1"]
if with_cm is True:
for class_name in self.class_names:
for predict_class_name in self.class_names:
fieldnames.append("TP_{}_P_{}".format(class_name, predict_class_name))
return fieldnames
def get_evaluation_dict(self, evaluation_fname=None, predict_fname=None, cm_fname=None, show_cm=False):
evaluation_dict = {}
evaluation_dict.update(self.get_evaluation_metric(evaluation_fname=evaluation_fname, predict_fname=predict_fname))
if self.is_multi_class is not True:
evaluation_dict.update(self.get_confusion_matrix(cm_fname=cm_fname, show_plot=show_cm))
return evaluation_dict
def get_evaluation_metric(self, evaluation_fname=os.path.join(config.EVALUATE_DATA_DIR, "evaluate.csv"),
predict_fname=os.path.join(config.EVALUATE_DATA_DIR, "prediction.csv")):
# TODO: save the evaluation results in the future.
metric_dict = {}
print("self.y_gold", self.y_gold)
print("self.y_pred", self.y_pred)
# Compute Area Under the Curve (AUC) using the trapezoidal rule
# fpr, tpr, thresholds = metrics.roc_curve(self.y_gold, self.y_pred, pos_label=2)
# print("auc", metrics.auc(fpr, tpr))
# default average='macro'
# roc_auc = roc_auc_score(self.y_gold, self.y_pred)
# metric_dict["roc_auc"] = round(roc_auc, 4)
# self.logger.info("roc_auc={}".format(round(roc_auc, 4)))
accuracy = accuracy_score(self.y_gold, self.y_pred)
metric_dict["accuracy"] = round(accuracy, 4)
self.logger.info("accuracy={}".format(round(accuracy, 4)))
precision, recall, F1, support = precision_recall_fscore_support(self.y_gold, self.y_pred, average='macro')
metric_dict["macro_prec"] = round(precision, 4)
metric_dict["macro_recall"] = round(recall, 4)
metric_dict["macro_f1"] = round(F1, 4)
self.logger.info(
"macro precision={}, recall={}, f1={}, support={}".format(round(precision, 4), round(recall, 4),
round(F1, 4), support))
precision, recall, F1, support = precision_recall_fscore_support(self.y_gold, self.y_pred, average='micro')
metric_dict["micro_prec"] = round(precision, 4)
metric_dict["micro_recall"] = round(recall, 4)
metric_dict["micro_f1"] = round(F1, 4)
self.logger.info(
"micro precision={}, recall={}, f1={}, support={}".format(round(precision, 4), round(recall, 4),
round(F1, 4), support))
precision, recall, F1, support = precision_recall_fscore_support(self.y_gold, self.y_pred, average='weighted')
metric_dict["weighted_prec"] = round(precision, 4)
metric_dict["weighted_recall"] = round(recall, 4)
metric_dict["weighted_f1"] = round(F1, 4)
self.logger.info(
"weighted precision={}, recall={}, f1={}, support={}".format(round(precision, 4), round(recall, 4),
round(F1, 4), support))
# For specific class names.
print("self.class_names", self.class_names)
if self.is_multi_class is True:
for i in self.class_names:
precision, recall, F1, support = precision_recall_fscore_support(self.y_gold[:, i], self.y_pred[:, i],
average='macro')
metric_dict["{}_prec".format(i)] = round(precision, 4)
metric_dict["{}_recall".format(i)] = round(recall, 4)
metric_dict["{}_f1".format(i)] = round(F1, 4)
metric_dict["{}_support".format(i)] = support
self.logger.info(
"class_name={}, macro precision={}, recall={}, f1={}, support={}".format(i, round(precision, 4), round(recall, 4),
round(F1, 4), support))
else:
precision = metrics.precision_score(self.y_gold, self.y_pred, labels=self.class_names, average=None)
recall = metrics.recall_score(self.y_gold, self.y_pred, average=None)
F1 = metrics.f1_score(self.y_gold, self.y_pred, average=None)
for i, class_name in enumerate(self.class_names):
print("class_name", class_name)
metric_dict["{}_prec".format(class_name)] = round(precision[i], 4)
metric_dict["{}_recall".format(class_name)] = round(recall[i], 4)
metric_dict["{}_f1".format(class_name)] = round(F1[i], 4)
report = classification_report(self.y_gold, self.y_pred)
self.logger.info("report:\n{}".format(report))
if evaluation_fname is not None:
with open(evaluation_fname, "w") as evaluate_file:
csv_writer = csv.DictWriter(evaluate_file, fieldnames=self.get_evaluation_fieldnames(with_cm=False))
csv_writer.writeheader()
csv_writer.writerow(metric_dict)
evaluate_file.flush()
self.logger.info("Save evaluation results to {}".format(evaluation_fname))
# TODO: output results.
if predict_fname is not None:
with open(predict_fname, "w") as predict_file:
if self.X_gold is not None:
df = pd.DataFrame(data=self.X_gold)
else:
df = pd.DataFrame()
df["predict"] = self.y_pred
df.to_csv(predict_file)
self.logger.info("Save prediction results to {}".format(predict_fname))
return metric_dict
def get_confusion_matrix(self, cm_fname=os.path.join(config.EVALUATE_DATA_DIR, "confusion_matrix.csv"),
show_plot=False):
cm_dict = {}
cnf_matrix = confusion_matrix(self.y_gold, self.y_pred, self.class_names)
self.logger.info("self.class_names={}".format(self.class_names))
self.logger.info("The confusion matrix is \n {} {}".format("TP\P ", self.class_names))
for i, row in enumerate(cnf_matrix):
self.logger.info("{}".format([self.class_names[i]] + row.tolist()))
for j, item in enumerate(row):
# i == j is the correct predicion
cm_dict["TP_{}_P_{}".format(self.class_names[i], self.class_names[j])] = item
if cm_fname is not None:
with open(cm_fname, "w") as out_file:
csv_writer = csv.writer(out_file)
csv_writer.writerow(["TP\P"] + self.class_names)
for i, row in enumerate(cnf_matrix):
csv_writer.writerow([self.class_names[i]] + row.tolist())
out_file.flush()
self.logger.info("Save confusion matrix in file {}".format(cm_fname))
if show_plot is True:
# TODO: we can store the plot as well.
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
# plt.figure()
plt.figure()
# from matplotlib.pyplot import figure
# figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
Model_Evaluator.plot_confusion_matrix(cnf_matrix, classes=self.class_names,
title='Confusion matrix')
plt.show()
return cm_dict
@staticmethod
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
Reference from sklearn documentation.
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
# plt.xticks(tick_marks, classes, rotation=45)
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
```
#### File: ml_algo/tradition_methods/logistic_regression.py
```python
import csv, argparse, os
import numpy as np
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy import sparse
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.model_selection import KFold
from utils.filters import Filters
from utils.pickel_helper import Pickle_Helper
import config
# from flask import current_app
from utils.file_logger import File_Logger_Helper
__author__ = "Jiaqi"
__version__ = "1"
__date__ = "Sept 14 2018"
class Logistic_Regression():
def __init__(self, dump_model_dir, filter_stopword=True, use_stemm=False, label_dict=None, logger=None):
self.logger = logger or File_Logger_Helper.get_logger(logger_fname="bow_logistic_regression")
self.classifier_name = "logistic_regression_bow"
self.use_stem = use_stemm
self.stemmer = PorterStemmer()
self.filter_stopword = filter_stopword
self.stopwrods = stopwords.words('english')
self.label_dict = label_dict
if not os.path.exists(dump_model_dir):
os.makedirs(dump_model_dir)
self.dump_model_fname = os.path.join(dump_model_dir, "bow_logistic_regression_model.pickle")
self.dump_dictionary_fname = os.path.join(dump_model_dir, "bow_logistic_regression_dictionary.pickle")
self.dump_label_encoder_fname = os.path.join(dump_model_dir, "bow_logistic_regression_label_encoder.pickle")
self.model = None
self.dictionary = None
self.label_encoder = None
self.load_model()
def read_text_list_label_list(self, filename):
self.logger.info("Read file" + filename)
with open(filename) as infile:
reader = csv.DictReader(infile)
text_list = []
label_list = []
for row in reader:
text_list.append(self.pre_processing(row["text"]))
label_list.append(self.label_mapper(row["label"]))
return np.array(text_list), np.array(label_list)
def pre_processing(self, text):
tokens = []
if text is not None:
filtered_text = Filters.replace_non_ascii(text.lower())
for token in word_tokenize(filtered_text):
# Use Stemmer
if self.use_stem is True:
updated_token = self.stemmer.stem(token)
else:
updated_token = token
# Filter stopwords
if self.filter_stopword is not True or token not in self.stopwrods:
tokens.append(updated_token)
return " ".join(tokens)
def label_mapper(self, label):
# If you need to convert the labels.
# return label
if self.label_dict and label in self.label_dict:
# if self.label_dict is not None and label in self.label_dict:
return self.label_dict[label]
return label
def get_features_array_label_array(self, text_list, label_list=None, is_training=False):
# label_list = np.array(label_list)
if is_training is True:
self.dictionary = TfidfVectorizer(stop_words='english')
self.dictionary.fit(text_list.ravel())
self.label_encoder = LabelEncoder()
self.label_encoder.fit(label_list.ravel())
bow_features_array = self.dictionary.transform(text_list.ravel())
features_array = sparse.hstack([bow_features_array]).tocsr()
if label_list is not None:
encoded_label_array = self.label_encoder.transform(label_list.ravel())
# class_names = self.label_encoder.inverse_transform(encoded_label_array)
# print(class_names)
else:
encoded_label_array = None
return features_array, encoded_label_array
def get_features_array_label_array_from_file(self, in_fname, is_training=False):
text_list, label_list = self.read_text_list_label_list(in_fname)
return self.get_features_array_label_array(text_list, label_list, is_training)
def train_model(self, train_file):
# training
self.logger.info("Training Model")
features_array, label_array = self.get_features_array_label_array_from_file(train_file, is_training=True)
self.model = LogisticRegression(solver='lbfgs',multi_class='multinomial',class_weight='balanced')
self.model.fit(features_array, label_array)
Pickle_Helper.save_model_to_pickle(self.model, self.dump_model_fname)
Pickle_Helper.save_model_to_pickle(self.dictionary, self.dump_dictionary_fname)
Pickle_Helper.save_model_to_pickle(self.label_encoder, self.dump_label_encoder_fname)
def load_model(self):
# Load the file is not already done so. If there is no pickle created, train one for it.
self.logger.info("Load Model")
if self.model is None:
self.model = Pickle_Helper.load_model_from_pickle(self.dump_model_fname)
self.dictionary = Pickle_Helper.load_model_from_pickle(self.dump_dictionary_fname)
self.label_encoder = Pickle_Helper.load_model_from_pickle(self.dump_label_encoder_fname)
if self.model is None:
self.train_model(config.WASHINGTON_TOPIC_DATA)
def cross_validation(self, data_file, K=10, results_file=None):
self.logger.info(str(K) + "Cross Validation")
features_array, label_array = self.get_features_array_label_array_from_file(data_file, is_training=True)
kf = KFold(n_splits=K, shuffle=True, random_state=1)
precision_sum = None
recall_sum = None
f1_sum = None
micro_f1_sum = 0
header = ['data_source', 'data_type', 'classifier', 'features', "label_name"
'precision', 'recall', 'f1', 'macro_f1']
out_file = None
csv_writer = None
if results_file is not None:
out_file = open(results_file, "w")
csv_writer = csv.writer(out_file)
csv_writer.writerow(header)
print(header)
n = 0
for train, test in kf.split(label_array):
self.model = LogisticRegression(solver='lbfgs',multi_class='multinomial',class_weight='balanced')
self.model.fit(features_array[train,:],label_array[train])
gold_labels = label_array[test]
predict_results = self.model.predict(features_array[test,:])
self.logger.info("Calculate Metrics")
precision, recall, f1, micro_f1 = self.calculate_metrics(gold_labels, predict_results, n, csv_writer)
n += 1
if precision_sum is None:
precision_sum = np.zeros(len(precision))
recall_sum = np.zeros(len(precision))
f1_sum = np.zeros(len(precision))
precision_sum += np.add(precision_sum, precision)
recall_sum += np.add(recall_sum, recall)
f1_sum += np.add(f1_sum, f1)
micro_f1_sum += micro_f1
# Pickle_Helper.save_model_to_pickle(self.model, self.dump_model_fname)
K_precision = precision_sum / K
K_recall = recall_sum / K
K_f1 = f1_sum / K
K_micro_f1 = micro_f1_sum / K
for i in range (len(K_precision)):
label_name = self.label_encoder.inverse_transform([i])[0]
print_row = ["washington_news", "{}-cross-validation".format(K), "logistic_regression", "bow", label_name,
"{0:.4f}".format(round(K_precision[i], 4)), "{0:.4f}".format(round(K_recall[i], 4)),
"{0:.4f}".format(round(K_f1[i], 4)), "{0:.4f}".format(round(K_micro_f1, 4))]
self.logger.info(print_row)
if csv_writer is not None:
csv_writer.writerow(print_row)
if csv_writer is not None:
out_file.close()
def calculate_metrics(self, X_label, Y_label, K=1, csv_writer=None):
precision = metrics.precision_score(X_label, Y_label, average=None)
recall = metrics.recall_score(X_label, Y_label, average=None)
f1 = metrics.f1_score(X_label, Y_label, average=None)
micro_f1 = metrics.f1_score(X_label, Y_label, average='micro')
for i in range (len(precision)):
label_name = self.label_encoder.inverse_transform([i])[0]
print_row = ["washington_news", "test_{}".format(K), "logistic_regression", "bow", label_name,
"{0:.4f}".format(round(precision[i], 4)), "{0:.4f}".format(round(recall[i], 4)),
"{0:.4f}".format(round(f1[i], 4)), "{0:.4f}".format(round(micro_f1, 4))]
self.logger.info(print_row)
if csv_writer is not None:
csv_writer.writerow(print_row)
return precision, recall, f1, micro_f1
def predict_results(self, test_file, result_file=None):
self.logger.info("Predict Results")
if self.model == None:
self.logger.error("Please train the model before testing")
features_array, label_array = self.get_features_array_label_array_from_file(test_file)
# TODO: save the prediction results as well.
predict_results = self.model.run_semi_supervise(features_array)
self.calculate_metrics(label_array, predict_results)
def predict(self, text):
features_array, label_array = self.get_features_array_label_array(np.array([text]))
predict_results = self.model.run_semi_supervise(features_array)
if predict_results is not None and len(predict_results) > 0:
class_names = self.label_encoder.inverse_transform(predict_results)
return class_names[0]
else:
self.logger.error("Faile to predict for text ", text)
if __name__ == "__main__":
# parser = argparse.ArgumentParser()
# parser.add_argument("-train", "--train_fname", help="train file", type=str, required=True)
# parser.add_argument("-test", "--test_fname", help="test file", type=str, required=True)
# args = parser.parse_args()
#
# # parameters
# train_file = args.train_fname
# test_file = args.test_fname
from app_settings import create_app
app = create_app("testing")
# classifier = BoW_Logistic_Regression(config.BOW_GENERAL_LOGISTIC_REGRESSION_MODEL_DIR)
# classifier.train_model(train_file)
# classifier.predict_results(test_file)
# classifier.cross_validation(config.WASHINGTON_TOPIC_DATA,
# results_file=os.path.join(config.BOW_TOPIC_LOGISTIC_REGRESSION_BOW_MODEL_DIR,
# "cross_validation_results.csv"))
classifier = BoW_Logistic_Regression(config.BOW_SWA_LOGISTIC_REGRESSION_BOW_MODEL_DIR,
label_dict={'%': "PRE", '+': "PLUS", '^2': "UP2", '^g': "UPG",
'^h': "UPH", '^q': "UPQ", 'aap_am': "AAPAM",
'arp_nd': "ARPND", 'b^m': "BUPM", 'fo_o_fw_"_by_bc': "FOO",
'oo_co_cc': "OOCOCC", 'qw^d': "QWUPD", 'qy^d': "QYUPD"})
classifier.cross_validation(os.path.join(config.DATA, "swda.csv"),
results_file=os.path.join(config.BOW_SWA_LOGISTIC_REGRESSION_BOW_MODEL_DIR,
"cross_validation_results.csv"))
```
#### File: machine_learning/utils/yaml_helper.py
```python
import yaml
class Yaml_Helper():
def __init__(self, config_fname):
self.config = self.load_config(config_fname) or {}
@staticmethod
def load_config(config_fname):
"""
Loads the configuration file config_file given by command line or
config.DEFAULT_CONFIG when none is specified. the default values
are used for any configuration value that is not specified in config_file.
:param args:
:return: dictionary of config values
"""
with open(config_fname) as config_file:
conf = yaml.load(config_file)
return conf
def add_config(self, config_fname, should_override=False):
"""
Please note that this function will always override the key values.
:param config_fname:
:return:
"""
default = self.load_config(config_fname)
for key in default:
if key not in self.config or should_override and key in self.config:
self.config[key] = default[key]
``` |
{
"source": "jiaqi-xi/Neural-Module-Networks.Tensorlayer",
"score": 3
} |
#### File: n2nmn-tensorlayer/models_shapes/nmn3_layers.py
```python
from __future__ import absolute_import, division, print_function
import sys
import tensorflow as tf
import tensorlayer as tl
import numpy as np
from tensorflow import convert_to_tensor as to_T
sess = tf.Session()
tl.layers.initialize_global_variables(sess)
def conv_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None):
# input has shape [batch, in_height, in_width, in_channels]
input_dim = bottom.get_shape().as_list()[-1]
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
if weights_initializer is None:
weights_initializer = tf.contrib.layers.xavier_initializer_conv2d()
if bias_term and biases_initializer is None:
biases_initializer = tf.constant_initializer(0.)
# filter has shape [filter_height, filter_width, in_channels, out_channels]
weights = tf.get_variable("weights",
[kernel_size, kernel_size, input_dim, output_dim],
initializer=weights_initializer)
if bias_term:
biases = tf.get_variable("biases", output_dim,
initializer=biases_initializer)
if not reuse:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tf.nn.l2_loss(weights))
conv = tf.nn.conv2d(bottom, filter=weights,
strides=[1, stride, stride, 1], padding=padding)
if bias_term:
conv = tf.nn.bias_add(conv, biases)
return conv
def conv_relu_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None):
# input has shape [batch, in_height, in_width, in_channels]
input_dim = bottom.get_shape().as_list()[-1]
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
if weights_initializer is None:
weights_initializer = tf.contrib.layers.xavier_initializer_conv2d()
if bias_term and biases_initializer is None:
biases_initializer = tf.constant_initializer(0.)
# filter has shape [filter_height, filter_width, in_channels, out_channels]
weights = tf.get_variable("weights",
[kernel_size, kernel_size, input_dim, output_dim],
initializer=weights_initializer)
if bias_term:
biases = tf.get_variable("biases", output_dim,
initializer=biases_initializer)
if not reuse:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tf.nn.l2_loss(weights))
conv = tf.nn.conv2d(bottom, filter=weights,
strides=[1, stride, stride, 1], padding=padding)
if bias_term:
conv = tf.nn.bias_add(conv, biases)
relu = tf.nn.relu(conv)
return relu
def deconv_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None):
# input_shape is [batch, in_height, in_width, in_channels]
input_shape = bottom.get_shape().as_list()
batch_size, input_height, input_width, input_dim = input_shape
output_shape = [batch_size, input_height*stride, input_width*stride, output_dim]
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
if weights_initializer is None:
weights_initializer = tf.contrib.layers.xavier_initializer_conv2d()
if bias_term and biases_initializer is None:
biases_initializer = tf.constant_initializer(0.)
# filter has shape [filter_height, filter_width, out_channels, in_channels]
weights = tf.get_variable("weights",
[kernel_size, kernel_size, output_dim, input_dim],
initializer=weights_initializer)
if bias_term:
biases = tf.get_variable("biases", output_dim,
initializer=biases_initializer)
if not reuse:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tf.nn.l2_loss(weights))
net = tl.layers.InputLayer(inputs=bottom, name=name+'input')
deconv = tl.layers.DeConv2dLayer(net, act=tf.identity, shape=[kernel_size, kernel_size, output_dim, input_dim],
output_shape=output_shape, strides=[1, stride, stride, 1],
padding=padding, W_init=weights_initializer, b_init=biases_initializer,
name=name+'deconv2d')
return deconv.outputs
def deconv_relu_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None):
deconv = deconv_layer(name, bottom, kernel_size, stride, output_dim, padding,
bias_term, weights_initializer, biases_initializer, reuse=reuse)
# relu = tl.layers.PReluLayer(deconv)
relu = tf.nn.relu(deconv)
return relu
def pooling_layer(name, bottom, kernel_size, stride):
#pool = tf.nn.max_pool(bottom, ksize=[1, kernel_size, kernel_size, 1],
# strides=[1, stride, stride, 1], padding='SAME', name=name)
net = tl.layers.InputLayer(inputs=bottom, name=name+'input')
pool = tl.layers.PoolLayer(net, ksize=[1, kernel_size, kernel_size, 1],
strides=[1, stride, stride, 1], padding='SAME', pool=tf.nn.max_pool, name=name+'pool')
return pool.outputs
def fc_layer(name, bottom, output_dim, bias_term=True, weights_initializer=None,
biases_initializer=None, reuse=None):
# flatten bottom input
# input has shape [batch, in_height, in_width, in_channels]
shape = bottom.get_shape().as_list()
input_dim = 1
for d in shape[1:]:
input_dim *= d
# flat_bottom = tf.reshape(bottom, [-1, input_dim])
net = tl.layers.InputLayer(inputs=bottom, name=name+'input')
flat_bottom = tl.layers.ReshapeLayer(net, [-1, input_dim], name=name+'reshape').outputs
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
if weights_initializer is None:
weights_initializer = tf.contrib.layers.xavier_initializer()
if bias_term and biases_initializer is None:
biases_initializer = tf.constant_initializer(0.)
# weights has shape [input_dim, output_dim]
weights = tf.get_variable("weights", [input_dim, output_dim],
initializer=weights_initializer)
if bias_term:
biases = tf.get_variable("biases", output_dim,
initializer=biases_initializer)
if not reuse:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tf.nn.l2_loss(weights))
if bias_term:
fc = tf.nn.xw_plus_b(flat_bottom, weights, biases)
else:
fc = tf.matmul(flat_bottom, weights)
return fc
def fc_relu_layer(name, bottom, output_dim, bias_term=True,
weights_initializer=None, biases_initializer=None, reuse=None):
fc = fc_layer(name, bottom, output_dim, bias_term, weights_initializer,
biases_initializer, reuse=reuse)
relu = tf.nn.relu(fc)
return relu
# convnet built for shapes dataset
def shapes_convnet(input_batch, hidden_dim=64, output_dim=64,
scope='shapes_convnet', reuse=None):
with tf.variable_scope(scope, reuse=reuse):
conv_1 = conv_relu_layer('conv_1', input_batch, kernel_size=10, stride=10,
output_dim=hidden_dim, padding='VALID')
conv_2 = conv_relu_layer('conv_2', conv_1, kernel_size=1, stride=1,
output_dim=output_dim)
return conv_2
# following convnet are safe even for empty data
def empty_safe_1x1_conv(name, bottom, output_dim, reuse=None):
# use this for 1x1 convolution in modules to avoid the crash.
bottom_shape = tf.shape(bottom)
input_dim = bottom.get_shape().as_list()[-1]
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
weights_initializer = tf.contrib.layers.xavier_initializer()
biases_initializer = tf.constant_initializer(0.)
weights = tf.get_variable('weights', [input_dim, output_dim],
initializer=weights_initializer)
biases = tf.get_variable('biases', output_dim,
initializer=biases_initializer)
conv_flat = tf.matmul(tf.reshape(bottom, [-1, input_dim]), weights) + biases
conv = tf.reshape(conv_flat, to_T([bottom_shape[0], bottom_shape[1], bottom_shape[2], output_dim]))
return conv
# use this for arbitrary convolution in modules to avoid the crash.
def empty_safe_conv(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None,
biases_initializer=None, reuse=None):
g = tf.get_default_graph()
with g.gradient_override_map({'Conv2D': 'Conv2D_handle_empty_batch'}):
return conv_layer(name, bottom, kernel_size, stride, output_dim,
padding, bias_term, weights_initializer,
biases_initializer, reuse=reuse)
@tf.RegisterGradient('Conv2D_handle_empty_batch')
def _Conv2DGrad(op, grad):
with tf.device('/cpu:0'):
filter_grad = tf.nn.conv2d_backprop_input( # compute gradient_filter
tf.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr('strides'),
op.get_attr('padding'), op.get_attr('use_cudnn_on_gpu'), op.get_attr('data_format'))
input_grad = tf.nn.conv2d_backprop_filter( # compute gradient_input
op.inputs[0], tf.shape(op.inputs[1]), grad, op.get_attr('strides'),
op.get_attr('padding'),op.get_attr('use_cudnn_on_gpu'), op.get_attr('data_format'))
return [filter_grad, input_grad]
``` |
{
"source": "jiaqi-xi/slot_attention",
"score": 3
} |
#### File: slot_attention/clevr_video/novel_view_data.py
```python
import os
from PIL import Image
from typing import Callable
from typing import Optional
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import pytorch_lightning as pl
class CLEVRNovelViewImageDataset(Dataset):
"""Dataset that loads image from one view from CLEVR novel view image."""
def __init__(self,
data_root: str,
clevr_transforms: Callable,
split: str = '00'):
assert split in ['00', '01']
self.split = split
self.data_root = data_root
self.clevr_transforms = clevr_transforms
self.data_path = os.path.join(data_root, "images")
assert os.path.exists(
self.data_root), f"Path {self.data_root} does not exist"
assert os.path.exists(
self.data_path), f"Path {self.data_path} does not exist"
self.pairs = self.get_pairs()
def __getitem__(self, index: int):
"""Load one view image"""
pair_name = self.pairs[index]
img = os.path.join(self.data_path, f'{pair_name}{self.split}.png')
img = Image.open(img)
img = img.convert("RGB")
return self.clevr_transforms(img)
def __len__(self):
return len(self.pairs)
def get_pairs(self):
all_files = os.listdir(self.data_path)
all_files = list(set([file[:-6] for file in all_files]))
# file is like 'CLEVR_new_000007'
# so a pair is f'{file}00.png' and f'{file}01.png'
return all_files
class CLEVRNovelViewImagePairDataset(CLEVRNovelViewImageDataset):
"""Dataset that loads paired images from CLEVR novel view image."""
def __init__(self, data_root: str, clevr_transforms: Callable):
self.data_root = data_root
self.clevr_transforms = clevr_transforms
self.data_path = os.path.join(data_root, "images")
assert os.path.exists(
self.data_root), f"Path {self.data_root} does not exist"
assert os.path.exists(
self.data_path), f"Path {self.data_path} does not exist"
self.pairs = self.get_pairs()
def __getitem__(self, index: int):
"""Load two views paired image"""
pair_name = self.pairs[index]
img1 = os.path.join(self.data_path, f'{pair_name}00.png')
img2 = os.path.join(self.data_path, f'{pair_name}01.png')
img1 = Image.open(img1)
img2 = Image.open(img2)
img1 = img1.convert("RGB")
img2 = img2.convert("RGB")
return torch.stack(
[self.clevr_transforms(img) for img in [img1, img2]], dim=0)
class CLEVRNovelViewImageDataModule(pl.LightningDataModule):
def __init__(
self,
data_root: str,
train_batch_size: int,
val_batch_size: int,
clevr_transforms: Callable,
num_workers: int,
):
super().__init__()
self.data_root = data_root
self.train_batch_size = train_batch_size
self.val_batch_size = val_batch_size
self.clevr_transforms = clevr_transforms
self.num_workers = num_workers
self.train_dataset = CLEVRNovelViewImageDataset(
data_root=self.data_root,
clevr_transforms=self.clevr_transforms,
split='00',
)
self.val_dataset = CLEVRNovelViewImagePairDataset(
data_root=self.data_root,
clevr_transforms=self.clevr_transforms,
)
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.train_batch_size,
shuffle=True,
num_workers=self.num_workers,
pin_memory=True,
)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
batch_size=self.val_batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
)
```
#### File: slot_language/contrastive_learning/perceptual_method.py
```python
import sys
import pytorch_lightning as pl
import torch
from torchvision import utils as vutils
from perceptual_model import PerceptualSlotAttentionModel
from perceptual_params import SlotAttentionParams
sys.path.append('../')
from utils import to_rgb_from_tensor
from method import SlotAttentionVideoLanguageMethod
class PerceptualSlotAttentionVideoLanguageMethod(
SlotAttentionVideoLanguageMethod):
def __init__(self, model: PerceptualSlotAttentionModel,
datamodule: pl.LightningDataModule,
params: SlotAttentionParams):
super().__init__(model, datamodule, params)
self.entropy_loss_w = params.entropy_loss_w
self.perceptual_loss_w = params.perceptual_loss_w
def training_step(self, batch, batch_idx, optimizer_idx=0):
train_loss = self.model.loss_function(batch)
loss = train_loss['recon_loss'] + \
self.perceptual_loss_w * train_loss['perceptual_loss']
if 'entropy' in train_loss.keys():
loss = loss + train_loss['entropy'] * self.entropy_loss_w
train_loss['loss'] = loss
logs = {key: val.item() for key, val in train_loss.items()}
self.log_dict(logs, sync_dist=True)
return {'loss': loss}
def sample_images(self):
dl = self.datamodule.val_dataloader()
perm = torch.randperm(self.params.val_batch_size)
idx = perm[:self.params.n_samples]
batch = {k: v[idx] for k, v in next(iter(dl)).items()}
if self.params.gpus > 0:
batch = {k: v.to(self.device) for k, v in batch.items()}
B, C, H, W = batch['img'].shape
# we want to compare the two imgs, so we cat as input
batch = dict(
img=torch.stack([batch['img'], batch['img2']],
dim=1).view(2 * B, C, H, W),
text=torch.stack([batch['text'], batch['text2']],
dim=1).view(2 * B, -1))
recon_combined, recons, masks, slots = self.model.forward(batch)
# combine images in a nice way so we can display all outputs in one grid, output rescaled to be between 0 and 1
out = to_rgb_from_tensor(
torch.cat(
[
batch['img'].unsqueeze(1), # original images
recon_combined.unsqueeze(1), # reconstructions
recons * masks + (1 - masks), # each slot
],
dim=1,
)) # [B, num_slots+2, C, H, W]
batch_size, num_slots, C, H, W = recons.shape
images = vutils.make_grid(
out.view(batch_size * out.shape[1], C, H, W).cpu(),
normalize=False,
nrow=out.shape[1],
) # [C, B*H, (num_slots+2)*W]
# also visualize the mask of slots
# masks of shape [B, num_slots, 1, H, W]
masks = torch.cat([masks] * C, dim=2) # [B, num_slots, C, H, W]
masks = vutils.make_grid(
masks.view(batch_size * masks.shape[1], C, H, W).cpu(),
normalize=False,
nrow=masks.shape[1],
) # [C, B*H, num_slots*W]
return images, masks
```
#### File: slot_language/contrastive_learning/perceptual_model.py
```python
import sys
from typing import Optional
import torch
from torch import nn
import torch.nn.functional as F
sys.path.append('../')
from model import SlotAttentionModel
import lpips
class PerceptualLoss(nn.Module):
def __init__(self, arch='vgg'):
super().__init__()
assert arch in ['alex', 'vgg', 'squeeze']
self.loss_fn = lpips.LPIPS(net=arch).eval()
for p in self.loss_fn.parameters():
p.requires_grad = False
def loss_function(self, x_prev, x_future):
"""x_prev and x_future are of same shape.
Should be mask * recon + (1 - mask)
"""
assert len(x_prev.shape) == len(x_future.shape) == 4
x_prev = torch.clamp(x_prev, min=-1., max=1.)
x_future = torch.clamp(x_future, min=-1., max=1.)
loss = self.loss_fn(x_prev, x_future).mean()
return loss
class PerceptualSlotAttentionModel(nn.Module):
"""SlotAttentionModel that uses Perceptual learning loss.
Args:
model: base encoder
arch: model used in Perceptual Loss
"""
def __init__(self, model: SlotAttentionModel, arch: str = 'vgg'):
super().__init__()
self.model = model
self.perceptual_loss = PerceptualLoss(arch)
def forward_test(self, data):
return self.model(dict(img=data['img'], text=data['text']))
def forward(self, data):
"""Forward function.
Args:
x (dict): Input data dict the the following keys:
- img: [B, C, H, W], image as q
- text: [B, L], text corresponding to img
- img2: [B, C, H, W], img as k
- text2: [B, L], text corresponding to img2
"""
# if in testing, directly return the output of SlotAttentionModel
if not self.training:
return self.forward_test(data)
img = torch.cat([data['img'], data['img2']], dim=0)
text = torch.cat([data['text'], data['text2']], dim=0)
x = dict(img=img, text=text)
recon_combined, recons, masks, slots = self.model(x)
return recon_combined, recons, masks, slots, None, None
def loss_function(self, input):
"""Calculate reconstruction loss and contrastive loss."""
if not self.training:
recon_combined, _, masks, _ = self.forward(input)
recon_loss = F.mse_loss(recon_combined, input['img'])
loss_dict = {
'recon_loss': recon_loss,
}
else:
recon_combined, recons, masks, _, _, _ = self.forward(input)
img = torch.cat([input['img'], input['img2']], dim=0)
recon_loss = F.mse_loss(recon_combined, img)
recon_1, recon_2 = recons[:recons.shape[0] // 2], recons[
recons.shape[0] // 2:] # torch.split(recons, 2, dim=0)
mask_1, mask_2 = masks[:masks.shape[0] // 2], masks[
masks.shape[0] // 2:] # torch.split(masks, 2, dim=0)
x_1 = mask_1 * recon_1 + (1 - mask_1)
x_2 = mask_2 * recon_2 + (1 - mask_2)
perceptual_loss = self.perceptual_loss.loss_function(
x_1.flatten(0, 1), x_2.flatten(0, 1))
loss_dict = {
'recon_loss': recon_loss,
'perceptual_loss': perceptual_loss,
}
# masks: [B, num_slots, 1, H, W], apply entropy loss
if self.model.use_entropy_loss:
masks = masks[:, :, 0] # [B, num_slots, H, W]
entropy_loss = (-masks * torch.log(masks + 1e-6)).sum(1).mean()
loss_dict['entropy'] = entropy_loss
return loss_dict
```
#### File: slot_attention/slot_language/detr_module.py
```python
import copy
from typing import Optional
import torch
import torch.nn.functional as F
from torch import Tensor, nn
class DETRText2Slot(nn.Module):
def __init__(
self,
in_channels: int,
num_slots: int,
slot_size: int = 64,
text_length: int = 77,
d_model: int = 64,
nhead: int = 1,
num_layers: int = 2,
dim_feedforward: int = 256,
dropout: float = 0.1,
activation: str = 'relu',
text_pe: bool = True,
out_mlp_layers: int = 2,
):
super().__init__()
decoder_layer = TransformerDecoderLayer(d_model, nhead,
dim_feedforward, dropout,
activation)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(
decoder_layer,
num_layers,
decoder_norm,
)
self._reset_parameters()
if in_channels != d_model:
self.input_proj = nn.Linear(in_channels, d_model, bias=True)
else:
self.input_proj = nn.Identity()
self.output_proj = MLP(d_model, d_model, slot_size, out_mlp_layers)
# learnable queries to interact with language features
self.query_embed = nn.Embedding(num_slots, d_model)
nn.init.xavier_uniform_( # as the slot_mu/sigma in slot-attention
self.query_embed.weight,
gain=nn.init.calculate_gain("linear"))
# learnable positional embedding for text features
self.text_pe = text_pe
if self.text_pe:
self.text_pos_embed = nn.Embedding(text_length, d_model)
nn.init.normal_(self.text_pos_embed.weight, std=0.01)
def forward(self, inputs: dict):
if isinstance(inputs, dict):
text_features = inputs['text_features']
text_padding_mask = inputs.get('text_padding_mask')
else:
text_features = inputs
text_padding_mask = None
bs = text_features.shape[0]
text_features = self.input_proj(text_features)
pos_embed = self.text_pos_embed.weight.unsqueeze(1).repeat(
1, bs, 1) if self.text_pe else None
query_embed = self.query_embed.weight.unsqueeze(1).repeat(1, bs, 1)
tgt = torch.zeros_like(query_embed)
pred_slots = self.decoder(
tgt,
text_features.permute(1, 0, 2).contiguous(),
memory_key_padding_mask=text_padding_mask,
pos=pos_embed,
query_pos=query_embed).permute(1, 0, 2).contiguous()
pred_slots = self.output_proj(pred_slots)
return pred_slots, None
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output = tgt
for layer in self.layers:
output = layer(
output,
memory,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos,
query_pos=query_pos,
)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(
d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(
q,
k,
value=tgt,
attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(
query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
class MLP(nn.Module):
"""Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
```
#### File: slot_attention/slot_language/method.py
```python
import pytorch_lightning as pl
import torch
from torch import optim
from torchvision import utils as vutils
from model import SlotAttentionModel
from params import SlotAttentionParams
from utils import Tensor, to_rgb_from_tensor
class SlotAttentionVideoLanguageMethod(pl.LightningModule):
def __init__(self, model: SlotAttentionModel,
datamodule: pl.LightningDataModule,
params: SlotAttentionParams):
super().__init__()
self.model = model
self.datamodule = datamodule
self.params = params
self.entropy_loss_w = params.entropy_loss_w
def forward(self, input: Tensor, **kwargs) -> Tensor:
return self.model(input, **kwargs)
def training_step(self, batch, batch_idx, optimizer_idx=0):
train_loss = self.model.loss_function(batch)
loss = train_loss['recon_loss']
if 'entropy' in train_loss.keys():
loss = loss + train_loss['entropy'] * self.entropy_loss_w
train_loss['loss'] = loss
logs = {key: val.item() for key, val in train_loss.items()}
# record training time
logs['data_time'] = \
self.trainer.profiler.recorded_durations['get_train_batch'][-1]
self.log_dict(logs, sync_dist=True)
return {'loss': loss}
def sample_images(self):
dl = self.datamodule.val_dataloader()
perm = torch.randperm(self.params.val_batch_size)
idx = perm[:self.params.n_samples]
batch = {k: v[idx] for k, v in next(iter(dl)).items()}
if self.params.gpus > 0:
batch = {k: v.to(self.device) for k, v in batch.items()}
recon_combined, recons, masks, slots = self.model.forward(batch)
# combine images in a nice way so we can display all outputs in one grid, output rescaled to be between 0 and 1
out = to_rgb_from_tensor(
torch.cat(
[
batch['img'].unsqueeze(1), # original images
recon_combined.unsqueeze(1), # reconstructions
recons * masks + (1 - masks), # each slot
],
dim=1,
)) # [B, num_slots+2, C, H, W]
batch_size, num_slots, C, H, W = recons.shape
images = vutils.make_grid(
out.view(batch_size * out.shape[1], C, H, W).cpu(),
normalize=False,
nrow=out.shape[1],
) # [C, B*H, (num_slots+2)*W]
# also visualize the mask of slots
# masks of shape [B, num_slots, 1, H, W]
masks = torch.cat([masks] * C, dim=2) # [B, num_slots, C, H, W]
masks = vutils.make_grid(
masks.view(batch_size * masks.shape[1], C, H, W).cpu(),
normalize=False,
nrow=masks.shape[1],
) # [C, B*H, num_slots*W]
return images, masks
def sample_video(self):
dst = self.datamodule.val_dataset
dst.is_video = True # load entire video
sampled_idx = torch.randperm(dst.num_videos)[:self.params.n_samples]
results = []
all_texts = []
for idx in sampled_idx:
idx = idx.item()
batch = dst.__getitem__(idx) # dict with key video, text, raw_text
video, text, raw_text = \
batch['video'], batch['text'], batch['raw_text']
all_texts.append(raw_text)
batch = dict(img=video, text=text)
if self.params.gpus > 0:
batch = {k: v.to(self.device) for k, v in batch.items()}
recon_combined, recons, masks, slots = self.model.forward(batch)
# combine images in a nice way so we can display all outputs in one grid, output rescaled to be between 0 and 1
out = to_rgb_from_tensor(
torch.cat(
[
batch['img'].unsqueeze(1), # original images
recon_combined.unsqueeze(1), # reconstructions
recons * masks + (1 - masks), # each slot
],
dim=1,
)) # [B (temporal dim), num_slots+2, 3, H, W]
T, num_slots, C, H, W = recons.shape
video = torch.stack([
vutils.make_grid(
out[i].cpu(),
normalize=False,
nrow=out.shape[1],
) for i in range(T)
]) # [T, 3, H, (num_slots+2)*W]
results.append(video)
dst.is_video = False
video = torch.cat(results, dim=2) # [T, 3, B*H, (num_slots+2)*W]
text = '\n'.join(all_texts)
return video, text
def validation_step(self, batch, batch_idx, optimizer_idx=0):
val_loss = self.model.loss_function(batch)
return val_loss
def validation_epoch_end(self, outputs):
avg_recon_loss = torch.stack([x['recon_loss'] for x in outputs]).mean()
logs = {
'val_loss': avg_recon_loss,
'val_recon_loss': avg_recon_loss,
}
if 'entropy' in outputs[0].keys():
avg_entropy = torch.stack([x['entropy'] for x in outputs]).mean()
logs['val_entropy'] = avg_entropy
logs['val_loss'] += avg_entropy * self.entropy_loss_w
self.log_dict(logs, sync_dist=True)
print("; ".join([f"{k}: {v.item():.6f}" for k, v in logs.items()]))
def configure_optimizers(self):
optimizer = optim.Adam(
filter(lambda p: p.requires_grad, self.model.parameters()),
lr=self.params.lr,
weight_decay=self.params.weight_decay)
warmup_steps_pct = self.params.warmup_steps_pct
decay_steps_pct = self.params.decay_steps_pct
total_steps = self.params.max_epochs * len(
self.datamodule.train_dataloader())
def warm_and_decay_lr_scheduler(step: int):
warmup_steps = warmup_steps_pct * total_steps
decay_steps = decay_steps_pct * total_steps
assert step <= total_steps
if step < warmup_steps:
factor = step / warmup_steps
else:
factor = 1
factor *= self.params.scheduler_gamma**(step / decay_steps)
return factor
scheduler = optim.lr_scheduler.LambdaLR(
optimizer=optimizer, lr_lambda=warm_and_decay_lr_scheduler)
return (
[optimizer],
[{
"scheduler": scheduler,
"interval": "step",
}],
)
```
#### File: slot_language/object_slot/obj_data.py
```python
import sys
import cv2
import copy
import numpy as np
from PIL import Image
from typing import Callable, Tuple
from typing import Optional
import torch
import torchvision.transforms.functional as TF
import clip
sys.path.append('../')
from data import CLEVRVisionLanguageCLIPDataset, CLEVRVisionLanguageCLIPDataModule
class ObjCLEVRVisionLanguageCLIPDataset(CLEVRVisionLanguageCLIPDataset):
"""Dataset that loads one random frame from CLEVR video.
One text ('color-shape' of an object) directly for one slot!
"""
def __init__(self,
data_root: str,
max_num_images: Optional[int],
clip_transforms: Callable,
max_n_objects: int = 6,
split: str = "train",
clip_len: int = 34,
is_video: bool = False,
shuffle_obj: bool = False):
# TODO: we assume `self.max_n_objects` == 6 here!
super().__init__(data_root, max_num_images, clip_transforms,
max_n_objects, split, clip_len, is_video, True, True)
self.shuffle_obj = shuffle_obj
def __getitem__(self, index: int):
"""Load one video and get only one frame from it"""
if self.is_video:
video = self._get_video(index) # clip pre-processed video frames
raw_text = [
self._generate_text(index * self.base_num + idx)
for idx in range(self.base_num)
] # raw
token = [self._pad_text_tokens(text) for text in raw_text]
return dict(
video=video,
text=torch.stack([t[0] for t in token], dim=0),
padding=torch.stack([t[1] for t in token], dim=0),
raw_text=', '.join(raw_text[0]))
img = self._get_frame(index) # clip pre-processed img tensor
text = self._generate_text(index) # raw text
token, padding = self._pad_text_tokens(text) # tokenize
return dict(img=img, text=token, padding=padding)
def _pad_text_tokens(self, texts: Tuple[str]):
"""Tokenize texts and pad to `self.max_n_objects`"""
tokens = clip.tokenize(texts) # [n, C]
# TODO: we're using `+1` to count for the background slot
num_pad = 1 + self.max_n_objects - tokens.shape[0]
pad_tokens = torch.zeros(num_pad, tokens.shape[1], dtype=tokens.dtype)
padding = torch.cat(
[torch.ones(tokens.shape[0]),
torch.zeros(num_pad)], dim=0).long()
return torch.cat([tokens, pad_tokens], dim=0), padding
def _generate_text(self, index: int):
"""Generate text descriptions of each object in the scene."""
img_idx = self._get_idx(index)[0]
anno = self.annos[img_idx]
colors = [obj['color'] for obj in anno['objects']]
shapes = [obj['shape'] for obj in anno['objects']]
texts = [
'a {} {}'.format(color, shape)
for color, shape in zip(colors, shapes)
]
# shuffle the order of objects
if self.split == 'train' and self.shuffle_obj:
np.random.shuffle(texts)
return texts
class ObjCLEVRVisionLanguageCLIPDataModule(CLEVRVisionLanguageCLIPDataModule):
def __init__(
self,
data_root: str,
train_batch_size: int,
val_batch_size: int,
clip_transforms: Callable,
num_workers: int,
max_n_objects: int = 6,
shuffle_obj: bool = False,
):
super().__init__(data_root, train_batch_size, val_batch_size,
clip_transforms, num_workers, max_n_objects)
self.shuffle_obj = shuffle_obj
self.train_dataset = ObjCLEVRVisionLanguageCLIPDataset(
data_root=self.data_root,
max_num_images=self.num_train_images,
clip_transforms=self.clip_transforms,
max_n_objects=self.max_n_objects,
split='train',
shuffle_obj=self.shuffle_obj,
)
self.val_dataset = ObjCLEVRVisionLanguageCLIPDataset(
data_root=self.data_root,
max_num_images=self.num_val_images,
clip_transforms=self.clip_transforms,
max_n_objects=self.max_n_objects,
split='val',
shuffle_obj=self.shuffle_obj,
)
class ObjRecurCLEVRVisionLanguageCLIPDataset(ObjCLEVRVisionLanguageCLIPDataset
):
"""Dataset that loads *consequent* frames (clips) from CLEVR video.
One text ('color-shape' of an object) directly for one slot!
"""
def __init__(
self,
data_root: str,
max_num_images: Optional[int],
clip_transforms: Callable,
max_n_objects: int = 6,
split: str = "train",
clip_len: int = 34,
is_video: bool = False,
shuffle_obj: bool = False,
sample_clip_num: int = 2, # loaded clips per video
):
# TODO: we assume `self.max_n_objects` == 6 here!
super().__init__(data_root, max_num_images, clip_transforms,
max_n_objects, split, clip_len, is_video, shuffle_obj)
self.sample_clip_num = sample_clip_num
if self.split == 'train':
self.base_num = self.clip_len - (self.sample_clip_num - 1)
else:
self.base_num = self.clip_len
self.val_divide_num = 2 * self.sample_clip_num if \
self.split == 'val' else 1
def __getitem__(self, index: int):
"""Load one video and get only one frame from it"""
if self.is_video:
data = super().__getitem__(index)
data['text'] = data['text'][:1]
data['padding'] = data['padding'][:1]
return data
clip = self._get_clip(index) # clip pre-processed img tensor
text = self._generate_text(index) # raw text
token, padding = self._pad_text_tokens(text) # tokenize
return dict(img=clip, text=token, padding=padding)
def _get_clip(self, index: int):
"""Get one random frame from the video."""
img_idx, frame_idx = self._get_idx(index)
image_path = self.files[img_idx]
cap = cv2.VideoCapture(image_path)
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
imgs = []
for _ in range(self.sample_clip_num):
success, img = cap.read()
assert success, f'read video {image_path} frame {frame_idx} fail!'
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
imgs.append(img)
cap.release()
# return the CLIP pre-processed image
# of shape [`sample_clip_num`, 3, H, W]
return torch.stack(
[self.clip_transforms(Image.fromarray(img)) for img in imgs],
dim=0)
class ObjRecurCLEVRVisionLanguageCLIPDataModule(
ObjCLEVRVisionLanguageCLIPDataModule):
def __init__(
self,
data_root: str,
train_batch_size: int,
val_batch_size: int,
clip_transforms: Callable,
num_workers: int,
max_n_objects: int = 6,
shuffle_obj: bool = False,
sample_clip_num: int = 2,
):
super().__init__(data_root, train_batch_size, val_batch_size,
clip_transforms, num_workers, max_n_objects,
shuffle_obj)
self.sample_clip_num = sample_clip_num
self.train_dataset = ObjRecurCLEVRVisionLanguageCLIPDataset(
data_root=self.data_root,
max_num_images=self.num_train_images,
clip_transforms=self.clip_transforms,
max_n_objects=self.max_n_objects,
split='train',
shuffle_obj=self.shuffle_obj,
sample_clip_num=self.sample_clip_num,
)
self.val_dataset = ObjRecurCLEVRVisionLanguageCLIPDataset(
data_root=self.data_root,
max_num_images=self.num_val_images,
clip_transforms=self.clip_transforms,
max_n_objects=self.max_n_objects,
split='val',
shuffle_obj=self.shuffle_obj,
sample_clip_num=self.sample_clip_num,
)
class ObjAugCLEVRVisionLanguageCLIPDataset(ObjCLEVRVisionLanguageCLIPDataset):
"""Dataset that loads one random frame from CLEVR video.
One text ('color-shape' of an object) directly for one slot!
Apply random augmentation to get another view of the frame.
"""
def __init__(self,
data_root: str,
max_num_images: Optional[int],
clip_transforms: Callable,
max_n_objects: int = 6,
split: str = "train",
clip_len: int = 34,
is_video: bool = False,
shuffle_obj: bool = False,
flip_img: bool = False):
super().__init__(data_root, max_num_images, clip_transforms,
max_n_objects, split, clip_len, is_video, shuffle_obj)
self.flip_img = flip_img
def __getitem__(self, index: int):
"""Load one video and get only one frame from it"""
if self.is_video:
return super().__getitem__(index)
# load one frame and potentially do horizontal flip
img = self._get_frame(index) # clip pre-processed img tensor
if self.flip_img:
flipped_img = TF.hflip(img)
else:
flipped_img = img.detach().clone()
if self.split != 'train':
text = self._generate_text(index)
token, padding = self._pad_text_tokens(text)
return dict(
img=img,
flipped_img=flipped_img,
is_flipped=self.flip_img,
text=token,
padding=padding)
# load text description and potentially do text shuffling
text, shuffled_texts, shuffled_idx = self._generate_text(index)
token, padding = self._pad_text_tokens(text)
shuffled_token, shuffled_padding, shuffled_idx = self._pad_text_tokens(
shuffled_texts, shuffled_idx)
assert (padding == shuffled_padding).all()
return dict(
img=img,
flipped_img=flipped_img,
is_flipped=self.flip_img,
text=token,
padding=padding,
shuffled_text=shuffled_token,
shuffled_idx=shuffled_idx,
is_shuffled=self.shuffle_obj)
def _pad_text_tokens(self, texts: Tuple[str], text_idx: np.ndarray = None):
"""Tokenize texts and pad to `self.max_n_objects`"""
tokens = clip.tokenize(texts) # [n, C]
# TODO: we're using `+1` to count for the background slot
num_pad = 1 + self.max_n_objects - tokens.shape[0]
pad_tokens = torch.zeros(num_pad, tokens.shape[1], dtype=tokens.dtype)
padded_tokens = torch.cat([tokens, pad_tokens], dim=0)
padding = torch.cat(
[torch.ones(tokens.shape[0]),
torch.zeros(num_pad)], dim=0).long()
if text_idx is not None: # [n]
padded_text_idx = -np.ones(padding.shape[0]).astype(np.int32)
padded_text_idx[:text_idx.shape[0]] = text_idx
return padded_tokens, padding, padded_text_idx
return padded_tokens, padding
def _generate_text(self, index: int):
"""Generate text descriptions of each object in the scene."""
img_idx = self._get_idx(index)[0]
anno = self.annos[img_idx]
colors = [obj['color'] for obj in anno['objects']]
shapes = [obj['shape'] for obj in anno['objects']]
texts = [
'a {} {}'.format(color, shape)
for color, shape in zip(colors, shapes)
]
# shuffle the order of objects
if self.split == 'train':
idx = np.arange(len(texts))
if self.shuffle_obj:
np.random.shuffle(idx)
shuffled_texts = [texts[i] for i in idx]
else:
shuffled_texts = copy.deepcopy(texts)
return texts, shuffled_texts, idx
return texts
class ObjAugCLEVRVisionLanguageCLIPDataModule(
ObjCLEVRVisionLanguageCLIPDataModule):
def __init__(
self,
data_root: str,
train_batch_size: int,
val_batch_size: int,
clip_transforms: Callable,
num_workers: int,
max_n_objects: int = 6,
shuffle_obj: bool = False,
flip_img: bool = False,
):
super().__init__(data_root, train_batch_size, val_batch_size,
clip_transforms, num_workers, max_n_objects,
shuffle_obj)
self.flip_img = flip_img
self.train_dataset = ObjAugCLEVRVisionLanguageCLIPDataset(
data_root=self.data_root,
max_num_images=self.num_train_images,
clip_transforms=self.clip_transforms,
max_n_objects=self.max_n_objects,
split='train',
shuffle_obj=self.shuffle_obj,
flip_img=self.flip_img,
)
self.val_dataset = ObjAugCLEVRVisionLanguageCLIPDataset(
data_root=self.data_root,
max_num_images=self.num_val_images,
clip_transforms=self.clip_transforms,
max_n_objects=self.max_n_objects,
split='val',
shuffle_obj=self.shuffle_obj,
flip_img=self.flip_img,
)
```
#### File: slot_language/object_slot/obj_model.py
```python
import sys
from typing import Tuple
import torch
from torch import nn
from torch.nn import functional as F
from clip import CLIP
from obj_utils import SepLinear, SepLayerNorm, SepGRUCell
sys.path.append('../')
from model import SlotAttention, BgSepSlotAttention, \
SlotAttentionModel, SoftPositionEmbed
class ObjSlotAttention(SlotAttention):
"""A wrapper for SlotAttention to make forward interface consistent."""
def forward(self, inputs, slots_mu, slots_log_sigma=None, fg_mask=None):
return super().forward(inputs, slots_mu, slots_log_sigma)
class SemPosSepSlotAttention(SlotAttention):
"""SlotAttention that treats semantic and position information separately.
The forward pass is the same, simply replacing Modules with SepModules.
Args:
pos_dim: number of dims for position information, w.r.t. `slot_size`.
E.g., if slot_size = 64 and pos_dim = 8, then if in_features = 144,
we assume the last 16 channels of the img_feats is pos_enc.
"""
def __init__(self,
in_features,
num_iterations,
num_slots,
slot_size,
mlp_hidden_size,
pos_dim=8,
epsilon=1e-6):
nn.Module.__init__(self)
self.pos_ratio = pos_dim / slot_size
self.pos_dim = pos_dim
self.in_features = int(in_features * (1 + self.pos_ratio))
self.num_iterations = num_iterations
self.num_slots = num_slots
self.slot_size = int(slot_size * (1 + self.pos_ratio))
self.mlp_hidden_size = int(mlp_hidden_size * (1 + self.pos_ratio))
self.epsilon = epsilon
self.attn_scale = self.slot_size**-0.5
self.norm_inputs = SepLayerNorm(in_features, self.in_features)
self.norm_slots = SepLayerNorm(slot_size, self.slot_size)
self.norm_mlp = SepLayerNorm(slot_size, self.slot_size)
# Linear maps for the attention module.
self.project_q = SepLinear(
slot_size, self.slot_size, self.slot_size, bias=False)
self.project_k = SepLinear(
in_features, self.in_features, self.slot_size, bias=False)
self.project_v = SepLinear(
in_features, self.in_features, self.slot_size, bias=False)
# Slot update functions.
self.gru = SepGRUCell(slot_size, self.slot_size, self.slot_size)
self.mlp = nn.Sequential(
SepLinear(slot_size, self.slot_size, self.mlp_hidden_size),
nn.ReLU(),
SepLinear(mlp_hidden_size, self.mlp_hidden_size, self.slot_size),
)
# FC to keep the output slot_size and mix sem and pos information
self.out_mlp = nn.Sequential(
nn.Linear(self.slot_size, slot_size),
nn.ReLU(),
nn.Linear(slot_size, slot_size),
)
def _init_slots(self, batch_size, slots_mu, slots_log_sigma):
# Initialize the slots. Shape: [batch_size, num_slots, slot_size].
assert len(slots_mu.shape) == 3, 'wrong slot embedding shape!'
assert int(slots_mu.shape[-1] * (1 + self.pos_ratio)) == self.slot_size
# pad it with pos_emb, inited as all zeros vector
slots = torch.cat([
slots_mu,
torch.zeros(batch_size, self.num_slots,
self.pos_dim).type_as(slots_mu).detach()
],
dim=-1)
return slots
def forward(self, inputs, slots_mu, slots_log_sigma=None, fg_mask=None):
slots = super().forward(inputs, slots_mu, slots_log_sigma)
return self.out_mlp(slots)
class ObjBgSepSlotAttention(BgSepSlotAttention):
"""Slot attention module that iteratively performs cross-attention.
The BgSep one processes fg slots and bg slots seperately.
TODO: the different of `Obj` version is that, here we may have different
number of background slots among batch data. Fortunately, all the
operations here are performed along the last dim (slot_size),
so we can safely view slots to [N, slot_size] tensor and forward pass.
"""
def forward(self, inputs, slots_mu, slots_log_sigma=None, fg_mask=None):
"""Forward function.
Args:
inputs: [B, N, C], flattened per-pixel features
slots_mu: if [B, num_slots, C], then directly use it as embeddings;
if [B, C], used to do sampling (mu shared by slots)
slots_log_sigma: if None, no sampling;
if [B, C], used to do sampling (sigma shared by slots)
fg_mask: [B, num_slots], boolean mask indicating fg/bg slots
"""
assert len(slots_mu.shape) == 3 and fg_mask is not None
bg_mask = ~fg_mask
# `inputs` has shape [batch_size, num_inputs, inputs_size].
# `num_inputs` is actually the spatial dim of feature map (H*W)
bs, num_inputs, inputs_size = inputs.shape
inputs = self.norm_inputs(inputs) # Apply layer norm to the input.
# Shape: [batch_size, num_inputs, slot_size].
k = self.project_k(inputs)
# Shape: [batch_size, num_inputs, slot_size].
v = self.project_v(inputs)
# Initialize the slots. Shape: [batch_size, num_slots, slot_size].
slots = slots_mu
fg_slots, bg_slots = slots[fg_mask], slots[bg_mask]
# calculate number of fg slots in each data
num_fgs = fg_mask.sum(1) # [B]
fg_start_idx = [num_fgs[:i].sum().item() for i in range(bs)]
fg_end_idx = [num_fgs[:i + 1].sum().item() for i in range(bs)]
num_bgs = (bg_mask).sum(1) # [B]
bg_start_idx = [num_bgs[:i].sum().item() for i in range(bs)]
bg_end_idx = [num_bgs[:i + 1].sum().item() for i in range(bs)]
# Multiple rounds of attention.
for _ in range(self.num_iterations):
fg_slots_prev = fg_slots
bg_slots_prev = bg_slots
# Attention.
fg_q = self.project_q(fg_slots)
bg_q = self.bg_project_q(bg_slots)
logits = torch.empty((bs, self.num_slots, num_inputs)).type_as(k)
k_trans = k.transpose(2, 1).contiguous()
for i in range(bs):
one_fg_q = fg_q[fg_start_idx[i]:fg_end_idx[i]].unsqueeze(0)
fg_logits = torch.matmul(one_fg_q, k_trans[i:i + 1])
one_bg_q = bg_q[bg_start_idx[i]:bg_end_idx[i]].unsqueeze(0)
bg_logits = torch.matmul(one_bg_q, k_trans[i:i + 1])
logits[i:i + 1] = torch.cat([fg_logits, bg_logits], dim=1)
attn = F.softmax(logits, dim=-1) + self.epsilon
# `attn` has shape: [batch_size, num_slots, num_inputs].
# Weighted mean.
attn = attn / attn.sum(dim=-1, keepdim=True)
fg_attn, bg_attn = attn[fg_mask], attn[bg_mask]
updates = torch.empty(
(bs, self.num_slots, self.slot_size)).type_as(attn)
for i in range(bs):
one_fg_attn = fg_attn[fg_start_idx[i]:fg_end_idx[i]]
fg_updates = torch.matmul(one_fg_attn.unsqueeze(0), v[i:i + 1])
one_bg_attn = bg_attn[bg_start_idx[i]:bg_end_idx[i]]
bg_updates = torch.matmul(one_bg_attn.unsqueeze(0), v[i:i + 1])
updates[i:i + 1] = torch.cat([fg_updates, bg_updates], dim=1)
# `updates` has shape: [batch_size, num_slots, slot_size].
# Slot update.
# GRU is expecting inputs of size (N,H)
# so flatten batch and slots dimension
fg_slots = self.gru(updates[fg_mask], fg_slots_prev)
fg_slots = fg_slots + self.mlp(fg_slots)
bg_slots = self.gru(updates[bg_mask], bg_slots_prev)
bg_slots = bg_slots + self.mlp(bg_slots)
slots = torch.empty((bs, self.num_slots, self.slot_size)).type_as(k)
slots[fg_mask] = fg_slots
slots[bg_mask] = bg_slots
return slots
class ObjSlotAttentionModel(SlotAttentionModel):
def __init__(self,
clip_model: CLIP,
use_clip_vision: bool,
use_clip_text: bool,
text2slot_model: nn.Module,
resolution: Tuple[int, int],
num_slots: int,
num_iterations: int,
enc_resolution: Tuple[int, int] = (128, 128),
enc_channels: int = 3,
enc_pos_enc: bool = False,
slot_size: int = 64,
dec_kernel_size: int = 5,
dec_hidden_dims: Tuple[int, ...] = (64, 64, 64, 64, 64),
dec_resolution: Tuple[int, int] = (8, 8),
slot_mlp_size: int = 128,
use_entropy_loss: bool = False,
use_bg_sep_slot: bool = False):
super().__init__(
clip_model,
use_clip_vision,
use_clip_text,
text2slot_model,
resolution,
num_slots,
num_iterations,
enc_resolution=enc_resolution,
enc_channels=enc_channels,
enc_pos_enc=enc_pos_enc,
slot_size=slot_size,
dec_kernel_size=dec_kernel_size,
dec_hidden_dims=dec_hidden_dims,
dec_resolution=dec_resolution,
slot_mlp_size=slot_mlp_size,
use_word_set=False,
use_padding_mask=False,
use_entropy_loss=use_entropy_loss,
use_bg_sep_slot=use_bg_sep_slot)
slot_attn = BgSepSlotAttention if \
self.use_bg_sep_slot else SlotAttention
# slot_attn = ObjBgSepSlotAttention if \
# self.use_bg_sep_slot else ObjSlotAttention
self.slot_attention = slot_attn(
in_features=self.out_features,
num_iterations=self.num_iterations,
num_slots=self.num_slots,
slot_size=self.slot_size,
mlp_hidden_size=self.slot_mlp_size,
)
def _get_slot_embedding(self, tokens, paddings):
"""Encode text, generate slot embeddings.
Args:
tokens: [B, N, C]
padding: [B, N]
"""
if not self.use_clip_text:
# not generating slots
return None, None
# we treat each obj as batch dim and get global text (for each phrase)
obj_mask = (paddings == 1)
obj_tokens = tokens[obj_mask] # [K, C]
text_features = self.clip_model.encode_text(
obj_tokens, lin_proj=False, per_token_emb=False,
return_mask=False) # [K, C]
text_features = text_features.type(self.dtype)
slots = self.text2slot_model(text_features, obj_mask)
return slots, obj_mask
def encode(self, x):
"""Encode from img to slots."""
img, text, padding = x['img'], x['text'], x['padding']
encoder_out = self._get_encoder_out(img) # transformed vision feature
# `encoder_out` has shape: [batch_size, height*width, filter_size]
# slot initialization
slot_mu, obj_mask = self._get_slot_embedding(text, padding)
# (batch_size, self.num_slots, self.slot_size)
slots = self.slot_attention(encoder_out, slot_mu, fg_mask=obj_mask)
return slots
class SemPosSepObjSlotAttentionModel(ObjSlotAttentionModel):
def __init__(self,
clip_model: CLIP,
use_clip_vision: bool,
use_clip_text: bool,
text2slot_model: nn.Module,
resolution: Tuple[int, int],
num_slots: int,
num_iterations: int,
enc_resolution: Tuple[int, int] = (128, 128),
enc_channels: int = 3,
enc_mlp_out: bool = True,
slot_size: int = 64,
pos_size: int = 8,
dec_kernel_size: int = 5,
dec_hidden_dims: Tuple[int, ...] = (64, 64, 64, 64, 64),
dec_resolution: Tuple[int, int] = (8, 8),
slot_mlp_size: int = 128,
use_entropy_loss: bool = False,
use_bg_sep_slot: bool = False):
super().__init__(
clip_model,
use_clip_vision,
use_clip_text,
text2slot_model,
resolution,
num_slots,
num_iterations,
enc_resolution=enc_resolution,
enc_channels=enc_channels,
enc_pos_enc=True,
slot_size=slot_size,
dec_kernel_size=dec_kernel_size,
dec_hidden_dims=dec_hidden_dims,
dec_resolution=dec_resolution,
slot_mlp_size=slot_mlp_size,
use_entropy_loss=use_entropy_loss,
use_bg_sep_slot=use_bg_sep_slot)
self.enc_mlp_out = enc_mlp_out
self.pos_size = pos_size
# Build Encoder related modules
self.pos_ratio = pos_size / slot_size
self.encoder_pos_embedding = ConcatSoftPositionEmbed(
3, int(self.enc_channels * self.pos_ratio), self.enc_resolution)
if self.enc_mlp_out:
self.encoder_out_layer = nn.Sequential(
nn.Linear(self.enc_channels, self.out_features),
nn.ReLU(),
nn.Linear(self.out_features, self.out_features),
)
self.slot_attention = SemPosSepSlotAttention(
in_features=self.out_features,
num_iterations=self.num_iterations,
num_slots=self.num_slots,
slot_size=self.slot_size,
mlp_hidden_size=self.slot_mlp_size,
pos_dim=self.pos_size,
)
def _get_encoder_out(self, img):
"""Encode image, potentially add pos enc, apply MLP."""
if self.use_clip_vision:
encoder_out = self.clip_model.encode_image(
img, global_feats=False, downstream=True) # BCDD
encoder_out = encoder_out.type(self.dtype)
else:
encoder_out = self.encoder(img)
encoder_out = encoder_out.permute(0, 2, 3, 1) # [B, H, W, C]
if self.enc_mlp_out:
encoder_out = self.encoder_out_layer(encoder_out)
encoder_out = self.encoder_pos_embedding(encoder_out).flatten(1, 2)
return encoder_out # [B, H*W, C]
class ConcatSoftPositionEmbed(SoftPositionEmbed):
"""Concat along channel dim."""
def forward(self, inputs):
emb_proj = self.dense(self.grid).repeat(inputs.shape[0], 1, 1, 1)
return torch.cat([inputs, emb_proj], dim=-1)
```
#### File: slot_language/stronger_model/two_stream_method.py
```python
import sys
import pytorch_lightning as pl
import torch
from torchvision import utils as vutils
from two_stream_model import TwoStreamSlotAttentionModel
from two_stream_params import SlotAttentionParams
sys.path.append('../')
from utils import to_rgb_from_tensor
from method import SlotAttentionVideoLanguageMethod
class TwoStreamSlotAttentionVideoLanguageMethod(
SlotAttentionVideoLanguageMethod):
def __init__(self, model: TwoStreamSlotAttentionModel,
datamodule: pl.LightningDataModule,
params: SlotAttentionParams):
super().__init__(model, datamodule, params)
self.entropy_loss_w = params.entropy_loss_w
def sample_images(self):
dl = self.datamodule.val_dataloader()
perm = torch.randperm(self.params.val_batch_size)
idx = perm[:self.params.n_samples]
batch = {k: v[idx] for k, v in next(iter(dl)).items()}
if self.params.gpus > 0:
batch = {k: v.to(self.device) for k, v in batch.items()}
recon_combined, recons, masks, coarse_masks = self.model.forward(batch)
# combine images in a nice way so we can display all outputs in one grid, output rescaled to be between 0 and 1
out = to_rgb_from_tensor(
torch.cat(
[
batch['img'].unsqueeze(1), # original images
recon_combined.unsqueeze(1), # reconstructions
recons * masks + (1 - masks), # each slot
],
dim=1,
)) # [B, num_slots+2, C, H, W]
bs, num_slots, C, H, W = recons.shape
images = vutils.make_grid(
out.view(bs * out.shape[1], C, H, W).cpu(),
normalize=False,
nrow=out.shape[1],
) # [C, B*H, (num_slots+2)*W]
# also visualize the mask of slots
# masks of shape [B, num_slots, 1, H, W]
masks = torch.cat([masks] * C, dim=2) # [B, num_slots, C, H, W]
masks = vutils.make_grid(
masks.view(bs * masks.shape[1], C, H, W).cpu(),
normalize=False,
nrow=masks.shape[1],
) # [C, B*H, num_slots*W]
# the same goes to coarse_mask
coarse_masks = torch.cat([coarse_masks] * C, dim=2)
coarse_masks = vutils.make_grid(
coarse_masks.view(bs * coarse_masks.shape[1], C, H, W).cpu(),
normalize=False,
nrow=coarse_masks.shape[1],
) # [C, B*H, num_slots*W]
return images, masks, coarse_masks
```
#### File: slot_language/stronger_model/two_stream_model.py
```python
import sys
from typing import Tuple
import torch
from torch import nn
from torch.nn import functional as F
from clip import CLIP
from unet_model import UNetSlotAttentionModel, \
SlotAttention, BgSepSlotAttention, SoftPositionEmbed
class TwoStreamSlotAttentionModel(UNetSlotAttentionModel):
"""CLIP + Slot Attention with UNet like structure.
UNet's downsample and upsample lead to stronger representation capacity.
"""
def __init__(self,
clip_model: CLIP,
use_clip_vision: bool,
use_clip_text: bool,
text2slot_model: nn.Module,
text2slot_model_conv: nn.Module,
resolution: Tuple[int, int],
num_slots: int,
num_iterations: int,
slot_size: int = 64,
slot_mlp_size: int = 128,
kernel_size: int = 5,
enc_channels: Tuple[int, ...] = (64, 64, 64, 64),
dec_channels: Tuple[int, ...] = (64, 64),
enc_pos_enc: bool = False,
enc_pos_enc_conv: bool = False,
dec_pos_enc: bool = False,
dec_resolution: Tuple[int, int] = (128, 128),
spread_hard_mask: bool = False,
finetune_mask: bool = False,
use_maxpool: bool = True,
use_bn: bool = False,
use_word_set: bool = False,
use_padding_mask: bool = False,
use_entropy_loss: bool = False,
use_bg_sep_slot: bool = False):
super().__init__(
clip_model,
use_clip_vision,
use_clip_text,
text2slot_model,
resolution,
num_slots,
num_iterations,
slot_size=slot_size,
slot_mlp_size=slot_mlp_size,
kernel_size=kernel_size,
enc_channels=enc_channels,
dec_channels=dec_channels,
enc_pos_enc=enc_pos_enc,
dec_resolution=dec_resolution,
use_maxpool=use_maxpool,
use_bilinear=True,
use_bn=use_bn,
use_word_set=use_word_set,
use_padding_mask=use_padding_mask,
use_entropy_loss=use_entropy_loss,
use_bg_sep_slot=use_bg_sep_slot)
self.enc_pos_enc_conv = enc_pos_enc_conv
self.dec_pos_enc = dec_pos_enc
if spread_hard_mask:
assert finetune_mask
self.spread_hard_mask = spread_hard_mask
self.finetune_mask = finetune_mask
if self.dec_pos_enc:
self.decoder_pos_embedding = SoftPositionEmbed(
3, self.out_features, self.dec_resolution)
else:
self.decoder_pos_embedding = None
out_dim = 4 if finetune_mask else 3
self.out_conv = nn.Conv2d(
dec_channels[-1], out_dim, kernel_size=3, stride=1, padding=1)
# building the second stream that outputs Conv kernels with features
self.text2slot_model_conv = text2slot_model_conv
if self.enc_pos_enc_conv:
self.encoder_pos_embedding_conv = SoftPositionEmbed(
3, self.out_features, self.enc_resolution)
else:
self.encoder_pos_embedding_conv = None
self.encoder_out_layer_conv = nn.Sequential(
nn.Linear(self.out_features, self.out_features),
nn.ReLU(),
nn.Linear(self.out_features, self.out_features),
)
slot_attn = BgSepSlotAttention if self.use_bg_sep_slot else SlotAttention
self.slot_attention_conv = slot_attn(
in_features=self.out_features,
num_iterations=self.num_iterations,
num_slots=self.num_slots,
slot_size=self.slot_size,
mlp_hidden_size=self.slot_mlp_size,
)
def _build_decoder(self, channels, kernel_size):
"""No up-sampling in the decoder."""
modules = []
for i in range(len(channels) - 1):
modules.append(
nn.Sequential(
nn.Conv2d(
channels[i],
channels[i + 1],
kernel_size=kernel_size,
stride=1,
padding=kernel_size // 2,
),
nn.ReLU(),
))
self.decoder = nn.Sequential(*modules)
def _get_encoder_out(self, img):
"""Encode image, potentially add pos enc, apply MLP."""
if self.use_clip_vision:
encoder_out = self.clip_model.encode_image(
img, global_feats=False, downstream=True) # BCDD
encoder_out = encoder_out.type(self.dtype)
else:
encoder_out = self.encoder(img)
return encoder_out
def _get_visual_features(self, encoder_out, pos_enc, enc_out_layer):
"""Add positional encoding and MLP on image feature maps."""
# `encoder_out` is of shape [B, C, H, W]
if pos_enc is not None:
encoder_out = pos_enc(encoder_out)
img_features = encoder_out.flatten(2, 3).permute(0, 2, 1)
img_features = enc_out_layer(img_features) # [B, H*W, C']
return img_features
def _encode_text_feature(self, text):
"""Encode text feature using LM in CLIP."""
if not self.use_clip_text:
# not generating slots
return None
text_features = self.clip_model.encode_text(
text,
lin_proj=False,
per_token_emb=self.use_word_set,
return_mask=self.use_padding_mask) # BC or BLC + padding mask
if self.use_padding_mask:
text_features, padding_mask = text_features[0].type(self.dtype), \
text_features[1].type(self.dtype)
text_features = dict(
text_features=text_features, padding_mask=padding_mask)
else:
text_features = text_features.type(self.dtype)
return text_features
def _get_slot_embedding(self, text_features, text2slot_model):
"""Encode text, generate slot embeddings."""
if text_features is None:
return None, None
slot_mu, slot_log_sigma = text2slot_model(text_features)
return slot_mu, slot_log_sigma
def forward(self, x):
torch.cuda.empty_cache()
feature_maps, slots, slots_conv = self.encode(x)
recon_combined, recons, masks, slots = self.decode(
feature_maps, slots, slots_conv, x['img'].shape)
return recon_combined, recons, masks, slots
def encode(self, x):
"""Encode from img to slots."""
img, text = x['img'], x['text']
# encoder_out is of shape [B, C, H, W]
encoder_out = self._get_encoder_out(img) # transformed vision feature
img_features = self._get_visual_features(encoder_out,
self.encoder_pos_embedding,
self.encoder_out_layer)
img_features_conv = self._get_visual_features(
encoder_out, self.encoder_pos_embedding_conv,
self.encoder_out_layer_conv)
# slot initialization
text_features = self._encode_text_feature(text)
slot_mu, slot_log_sigma = self._get_slot_embedding(
text_features, self.text2slot_model)
slot_mu_conv, slot_log_sigma_conv = self._get_slot_embedding(
text_features, self.text2slot_model_conv)
# (batch_size, self.num_slots, self.slot_size)
slots = self.slot_attention(img_features, slot_mu, slot_log_sigma)
slots_conv = self.slot_attention_conv(img_features_conv, slot_mu_conv,
slot_log_sigma_conv)
return encoder_out, slots, slots_conv
def decode(self, feature_maps, slots, slots_conv, img_shape):
"""Decode from slots to reconstructed images and masks.
Args:
feature_maps: [B, C, H, W], feature maps from `self.encoder`
slots/slots_conv: [B, num_slots, C]
"""
batch_size, num_slots, slot_size = slots.shape
batch_size, num_channels, height, width = img_shape
# Conv feature maps to get seg_mask with slots_conv as kernels
# seg_mask is of shape [B, num_slots, H, W]
seg_mask = torch.einsum('bnc,bchw->bnhw', [slots_conv, feature_maps])
seg_mask = F.softmax(seg_mask, dim=1)
# spread slots to the regions of seg_mask
if self.spread_hard_mask:
seg_mask = (seg_mask == seg_mask.max(1)[0].unsqueeze(1)).float()
decoder_in = torch.einsum('bnc,bnhw->bnchw', [slots, seg_mask])
decoder_in = decoder_in.flatten(0, 1) # [B * num_slots, C, H, W]
# decode results
if self.dec_pos_enc:
decoder_in = self.decoder_pos_embedding(decoder_in)
out = self.decoder(decoder_in)
out = self.out_conv(out)
# `out` has shape: [B * num_slots, 3(+1), H, W].
out = out.view(batch_size, num_slots, -1, height, width)
recons = out[:, :, :num_channels, :, :]
if self.finetune_mask:
masks = out[:, :, -1:, :, :]
masks = F.softmax(masks, dim=1)
else: # already after softmax
masks = seg_mask.unsqueeze(2) # [B, num_slots, 1, H, W]
recon_combined = torch.sum(recons * masks, dim=1)
return recon_combined, recons, masks, seg_mask.unsqueeze(2)
@property
def dtype(self):
return self.out_conv.weight.dtype
class MaskFormerSlotAttentionModel(TwoStreamSlotAttentionModel):
"""CLIP + Slot Attention with UNet like structure.
UNet's downsample and upsample lead to stronger representation capacity.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
# building the second stream that outputs Conv kernels with features
down_scale = 2**(len(self.enc_channels) - 1)
feats_res = (self.enc_resolution[0] // down_scale,
self.enc_resolution[1] // down_scale)
if self.enc_pos_enc_conv:
self.encoder_pos_embedding_conv = SoftPositionEmbed(
3, self.enc_channels[-1], feats_res)
else:
self.encoder_pos_embedding_conv = None
self.encoder_out_layer_conv = nn.Sequential(
nn.Linear(self.enc_channels[-1], self.enc_channels[-1]),
nn.ReLU(),
nn.Linear(self.enc_channels[-1], self.enc_channels[-1]),
)
slot_attn = BgSepSlotAttention if self.use_bg_sep_slot else SlotAttention
self.slot_attention_conv = slot_attn(
in_features=self.enc_channels[-1],
num_iterations=self.num_iterations,
num_slots=self.num_slots,
slot_size=self.slot_size,
mlp_hidden_size=self.slot_mlp_size,
)
def _get_encoder_out(self, img):
"""Encode image, potentially add pos enc, apply MLP."""
assert not self.use_clip_vision
encoder_out, inter_feats = self.encoder(img, return_feats=True)
return encoder_out, inter_feats
def encode(self, x):
"""Encode from img to slots."""
img, text = x['img'], x['text']
# encoder_out is of shape [B, C, H, W]
encoder_out, inter_feats = self._get_encoder_out(img)
img_features = self._get_visual_features(encoder_out,
self.encoder_pos_embedding,
self.encoder_out_layer)
img_features_conv = self._get_visual_features(
inter_feats, self.encoder_pos_embedding_conv,
self.encoder_out_layer_conv)
# slot initialization
text_features = self._encode_text_feature(text)
slot_mu, slot_log_sigma = self._get_slot_embedding(
text_features, self.text2slot_model)
slot_mu_conv, slot_log_sigma_conv = self._get_slot_embedding(
text_features, self.text2slot_model_conv)
# (batch_size, self.num_slots, self.slot_size)
slots = self.slot_attention(img_features, slot_mu, slot_log_sigma)
slots_conv = self.slot_attention_conv(img_features_conv, slot_mu_conv,
slot_log_sigma_conv)
return encoder_out, slots, slots_conv
```
#### File: slot_attention/slot_language/text_model.py
```python
from typing import Tuple
import torch
from torch import nn
from torch.nn import TransformerDecoder, TransformerDecoderLayer
from utils import Tensor
def fc_bn_relu(in_dim, out_dim, use_bn):
if use_bn:
return nn.Sequential(
nn.Linear(in_dim, out_dim, bias=False),
nn.BatchNorm1d(out_dim),
nn.ReLU(),
)
return nn.Sequential(
nn.Linear(in_dim, out_dim, bias=True),
nn.ReLU(),
)
def build_mlps(in_channels, hidden_sizes, out_channels, use_bn):
if hidden_sizes is None or len(hidden_sizes) == 0:
return nn.Linear(in_channels, out_channels)
modules = [fc_bn_relu(in_channels, hidden_sizes[0], use_bn=use_bn)]
for i in range(0, len(hidden_sizes) - 1):
modules.append(
fc_bn_relu(hidden_sizes[i], hidden_sizes[i + 1], use_bn=use_bn))
modules.append(nn.Linear(hidden_sizes[-1], out_channels))
return nn.Sequential(*modules)
class MLPText2Slot(nn.Module):
"""Generate slot embedding from text features using MLPs.
Args:
in_channels (int): channels of input text features.
hidden_sizes (Tuple[int]): MLPs hidden sizes.
predict_dist (bool): whether to predict the (shared) mu and log_sigma
of slot embedding, or directly predict each slot's value.
"""
def __init__(self,
in_channels: int,
num_slots: int,
slot_size: int,
hidden_sizes: Tuple[int] = (256, ),
predict_dist: bool = True,
use_bn: bool = False):
super(MLPText2Slot, self).__init__()
self.num_slots = num_slots
self.slot_size = slot_size
self.predict_dist = predict_dist
self.device = "cuda" if torch.cuda.is_available() else "cpu"
if self.predict_dist:
self.mlp_mu = build_mlps(
in_channels, hidden_sizes, slot_size, use_bn=use_bn)
self.mlp_log_sigma = build_mlps(
in_channels, hidden_sizes, slot_size, use_bn=use_bn)
else:
self.mlp_mu = build_mlps(
in_channels,
hidden_sizes,
num_slots * slot_size,
use_bn=use_bn)
def forward(self, text_features: Tensor):
"""Forward function.
Args:
text_features: [B, C], features extracted from sentences
"""
slot_mu = self.mlp_mu(text_features)
if self.predict_dist:
slot_log_sigma = self.mlp_log_sigma(text_features)
return slot_mu, slot_log_sigma
return slot_mu.view(-1, self.num_slots, self.slot_size), None
class TransformerText2Slot(nn.Module):
"""Generate slot embedding from text features using TransformerDecoders.
Args:
in_channels (int): channels of input text features.
d_model (int): hidden dims in Transformer
"""
def __init__(self,
in_channels: int,
num_slots: int,
slot_size: int = 64,
text_length: int = 77,
d_model: int = 64,
nhead: int = 1,
num_layers: int = 2,
dim_feedforward: int = 256,
dropout: float = 0.1,
activation: str = 'relu',
text_pe: bool = True,
out_mlp_layers: int = 2):
super(TransformerText2Slot, self).__init__()
# Transformer decoder for query, language interaction
decoder_layer = TransformerDecoderLayer(
d_model,
nhead,
dim_feedforward=dim_feedforward,
dropout=dropout,
activation=activation)
norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(
decoder_layer=decoder_layer, num_layers=num_layers, norm=norm)
# reset params as in MaskFormer
self._reset_parameters()
if in_channels != d_model:
self.input_proj = nn.Linear(in_channels, d_model, bias=True)
else:
self.input_proj = nn.Identity()
hidden_dims = [d_model for _ in range(out_mlp_layers - 1)]
self.output_proj = build_mlps(
d_model, hidden_dims, slot_size, use_bn=False)
# learnable queries to interact with language features
self.query_embed = nn.Embedding(num_slots, d_model)
nn.init.xavier_uniform_( # as the slot_mu/sigma in slot-attention
self.query_embed.weight,
gain=nn.init.calculate_gain("linear"))
# learnable positional embedding for text features
self.text_pe = text_pe
if self.text_pe:
self.text_pos_embed = nn.Embedding(text_length, d_model)
nn.init.normal_(self.text_pos_embed.weight, std=0.01)
def forward(self, inputs: dict):
"""Forward function.
Args:
text_features: [B, L, C], features extracted for *each* word.
text_padding_mask: [B, L], mask indicating padded position
"""
if isinstance(inputs, dict):
text_features = inputs['text_features']
text_padding_mask = inputs.get('text_padding_mask')
else:
text_features = inputs
text_padding_mask = None
bs = text_features.shape[0]
query_embed = self.query_embed.weight.unsqueeze(1).repeat(1, bs, 1)
text_features = self.input_proj(text_features)
if self.text_pe: # do positional encoding
text_features = text_features + \
self.text_pos_embed.weight.unsqueeze(0)
pred_slots = self.decoder(
query_embed,
text_features.permute(1, 0, 2).contiguous(),
memory_key_padding_mask=text_padding_mask,
).permute(1, 0, 2).contiguous() # [B, num_slots, D]
pred_slots = self.output_proj(pred_slots)
return pred_slots, None
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
class ObjMLPText2Slot(nn.Module):
"""Generate slot embedding from text features using MLPs.
Input for each scene is [N, C]
Args:
in_channels (int): channels of input text features.
hidden_sizes (Tuple[int]): MLPs hidden sizes.
"""
def __init__(self,
in_channels: int,
slot_size: int,
hidden_sizes: Tuple[int] = (256, ),
use_bn: bool = False):
super(ObjMLPText2Slot, self).__init__()
self.slot_size = slot_size
self.device = "cuda" if torch.cuda.is_available() else "cpu"
# this is for the background slots that don't have predicted embedding
self.slots_mu = nn.Parameter(
nn.init.xavier_uniform_(
torch.zeros((1, self.slot_size)),
gain=nn.init.calculate_gain("linear")))
self.slots_log_sigma = nn.Parameter(
nn.init.xavier_uniform_(
torch.zeros((1, self.slot_size)),
gain=nn.init.calculate_gain("linear")))
# simple share-weight MLPs
self.mlp = build_mlps(
in_channels, hidden_sizes, slot_size, use_bn=use_bn)
def forward(self, text_features: Tensor, padding_mask: Tensor):
"""Forward function.
Args:
text_features: [K, C], features extracted from sentences
padding_mask: [B, num_slots] boolean mask
"""
assert text_features.shape[0] == padding_mask.sum()
obj_slots = self.mlp(text_features)
pad_num = padding_mask.numel() - text_features.shape[0]
slots_init = torch.randn(pad_num, self.slot_size).type_as(obj_slots)
pad_slots = self.slots_mu + self.slots_log_sigma.exp() * slots_init
# do the padding and build final slots
bs, num_slots = padding_mask.shape
slots = torch.empty((bs, num_slots, self.slot_size)).type_as(obj_slots)
slots[padding_mask] = obj_slots
slots[~padding_mask] = pad_slots.type_as(obj_slots)
return slots
``` |
{
"source": "JiaqiYao/dynamic_multi_label",
"score": 2
} |
#### File: dynamic_multi_label/metric/measure.py
```python
import numpy as np
from collections import defaultdict
from collections import OrderedDict
import heapq
def measure_k(probs, true_ys, k_list=[1, 3, 5]):
max_k = np.max(k_list)
num_samples = np.size(true_ys, 0)
precision_k = defaultdict(float)
dgg_k = defaultdict(float)
ndgg_k = defaultdict(float)
for i in range(num_samples):
prob = probs[i, :]
true_y = true_ys[i, :]
prob = list(zip(prob, range(len(prob))))
max_k_largest_index = [x[1] for x in heapq.nlargest(max_k, prob, key=lambda x: x[0])]
for k in k_list:
precision_k[k] += np.sum(true_y[max_k_largest_index[0:k]])/k
dgg_k[k] += np.sum(true_y[max_k_largest_index[0:k]] / np.log2(2+np.arange(k)))
for k in k_list:
precision_k[k] /= num_samples
dgg_k[k] /= num_samples
ndgg_k[k] = dgg_k[k] / np.sum(1/np.log2(2+np.arange(k)))
return precision_k, dgg_k, ndgg_k
def measure_b(pred_b, y):
epsilon = 1e-9
#micro
tp = np.sum(np.logical_and(pred_b, y))
fp = np.sum(np.logical_and(pred_b, np.logical_not(y)))
fn = np.sum(np.logical_and(np.logical_not(pred_b), y))
micro_p = tp/(tp+fp+epsilon)
micro_r = tp/(tp+fn+epsilon)
micor_f1 = 2*micro_p*micro_r/(micro_p+micro_r)
#marco
tp = np.sum(np.logical_and(pred_b, y), 0)
fp = np.sum(np.logical_and(pred_b, np.logical_not(y)), 0)
fn = np.sum(np.logical_and(np.logical_not(pred_b), y), 0)
marco_p = np.mean(tp/(tp+fp+epsilon))
marco_r = np.mean(tp/(tp+fn+epsilon))
marco_f1 = 2*marco_p*marco_r/(marco_p+marco_r)
#Example based measures
hamming_loss = np.mean(np.logical_xor(pred_b, y))
accuracy = np.mean(np.sum(np.logical_and(pred_b, y), 1)/np.sum(np.logical_or(pred_b, y), 1))
precision = np.mean(np.sum(np.logical_and(pred_b, y), 1)/np.sum(pred_b, 1))
recall = np.mean(np.sum(np.logical_and(pred_b, y), 1)/(np.sum(y, 1)+epsilon))
F1 = np.mean(2*np.sum(np.logical_and(pred_b, y), 1)/(np.sum(pred_b, 1)+np.sum(y, 1)))
return micro_p, micro_r, micor_f1, marco_p, marco_r, marco_f1, hamming_loss, accuracy, precision, recall, F1
def measure_multi_label(probabilities, true_ys, mode, k_list=[1, 3, 5]):
#measure_k
result = OrderedDict()
precision_k, dgg_k, ndgg_k = measure_k(probabilities, true_ys, k_list)
for k in k_list:
result[mode+'_Precision@{0:d}'.format(k)] = precision_k[k]
result[mode+'_dgg@{0:d}'.format(k)] = dgg_k[k]
result[mode+'_ndgg@{0:d}'.format(k)] = ndgg_k[k]
# measure_b
binaries = probabilities >= 0.5 # thresholds
# revise to guarantee at least one is one
index = np.argmax(probabilities, 1)
binaries = np.reshape(np.array(binaries), [-1])
index = index + np.size(probabilities, 1) * np.arange(np.size(probabilities, 0))
binaries[index] = 1
binaries.shape = np.size(probabilities, 0), np.size(probabilities, 1)
# print(np.sum(np.any(binaries,1)))
binaries = binaries.astype(int)
micro_p, micro_r, micor_f1, marco_p, marco_r, marco_f1, hamming_loss, accuracy, precision, recall, F1 = measure_b(
binaries, true_ys)
# print(
# 'micro_p {0:.4f}\nmicro_r {1:.4f}\nmicor_f1 {2:.4f}\nmarco_p {3:.4f}\nmarco_r {4:.4f}\nmarco_f1 {5:.4f}\nhamming_loss {6:.4f}\naccuracy {7:.4f}\nprecision {8:.4f}\nrecall {9:.4f}\nF1 {10:.4f}\n'
# .format(micro_p, micro_r, micor_f1, marco_p, marco_r, marco_f1, hamming_loss, accuracy,
# precision, recall, F1)
# )
result[mode+'_micro_p'] = micro_p
result[mode+'_micro_r'] = micro_r
result[mode+'_micor_f1'] = micor_f1
result[mode+'_marco_p'] = marco_p
result[mode+'_marco_r'] = marco_r
result[mode+'_marco_f1'] = marco_f1
result[mode+'_hamming_loss'] = hamming_loss
result[mode+'_accuracy'] = accuracy
result[mode+'_precision'] = precision
result[mode+'_recall'] = recall
result[mode+'_F1'] = F1
return result
def measure_ex(binaries, true_ys):
epsilon = 1e-9
precision = np.sum(np.logical_and(binaries, true_ys), 1)/np.sum(binaries, 1)
recall = np.sum(np.logical_and(binaries, true_ys), 1)/(np.sum(true_ys, 1)+epsilon)
F1 = 2*np.sum(np.logical_and(binaries, true_ys), 1)/(np.sum(binaries, 1)+np.sum(true_ys, 1))
return F1
```
#### File: dynamic_multi_label/utils/generate_data_pickle.py
```python
import tensorflow as tf
import numpy as np
from gensim.models.keyedvectors import KeyedVectors
import pickle
import json
import os
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def __init__(self, data_dir, word2vec_path, max_sentence_length):
self.data_dir = data_dir
self.word2vec_path = word2vec_path
self.max_sentence_length = max_sentence_length
self.labels = set()
self.num_class = 0
self.label_map = dict()
self.tokenizer = None
def _build_vocabulary(self, train_texts, oov_token='UNK', filters='', lower=True):
self.tokenizer = tf.keras.preprocessing.text.Tokenizer(
oov_token=oov_token,
filters=filters,
lower=lower)
self.tokenizer.fit_on_texts(train_texts)
# add PAD
self.tokenizer.word_index['<PAD>'] = 0
self.tokenizer.index_word[0] = '<PAD>'
self.tokenizer.word_counts['<PAD>'] = 0
self.tokenizer.word_docs['<PAD>'] = 0
# get word embedding
self.dump_word_embedding(self.tokenizer.word_index)
print("Build the vocabulary done")
def build_label_map(self, train_labels_name, valid_labels_name, test_labels_name):
train_labels_path = os.path.join(self.data_dir, train_labels_name)
valid_labels_path = os.path.join(self.data_dir, valid_labels_name)
test_labels_path = os.path.join(self.data_dir, test_labels_name)
with open(train_labels_path, 'rt') as fin:
train_labels = json.load(fin)
with open(valid_labels_path, 'rt') as fin:
valid_labels = json.load(fin)
with open(test_labels_path, 'rt') as fin:
test_labels = json.load(fin)
for train_label in train_labels+valid_labels+test_labels:
self.labels = self.labels.union(train_label)
self.num_class = len(self.labels)
self.label_map = dict(zip(self.labels, range(self.num_class)))
def _transform_label(self, label):
label_id = np.zeros(self.num_class, dtype=np.int64)
for item in label:
if item in self.label_map:
label_id[self.label_map[item]] = 1
else:
return None
return label_id
def dump_train_features(self, text_name, label_name):
text_path = os.path.join(self.data_dir, text_name)
label_path = os.path.join(self.data_dir, label_name)
texts, labels = self._get_data_from_json(text_path, label_path)
self._build_vocabulary(texts)
# self._build_label_map(labels)
texts_ids = self.tokenizer.texts_to_sequences(texts)
max_sentence_length = max(len(x) for x in texts_ids)
if max_sentence_length < self.max_sentence_length:
self.max_sentence_length = max_sentence_length
print("max sentence length is {}".format(self.max_sentence_length))
# padding
texts_ids = tf.keras.preprocessing.sequence.pad_sequences(texts_ids,
maxlen=self.max_sentence_length,
padding='post',
truncating='post')
labels_ids = np.array([self._transform_label(label) for label in labels])
with open(os.path.join(self.data_dir, 'train_texts_ids.dat'), 'wb') as fout:
pickle.dump(texts_ids, fout)
with open(os.path.join(self.data_dir, 'train_labels_ids.dat'), 'wb') as fout:
pickle.dump(labels_ids, fout)
print("Train Data Done {}".format(len(labels_ids)))
def dump_eval_features(self, text_name, label_name):
text_path = os.path.join(self.data_dir, text_name)
label_path = os.path.join(self.data_dir, label_name)
texts, labels = self._get_data_from_json(text_path, label_path)
texts_ids = self.tokenizer.texts_to_sequences(texts)
# padding
texts_ids = tf.keras.preprocessing.sequence.pad_sequences(texts_ids,
maxlen=self.max_sentence_length,
padding='post',
truncating='post')
labels_ids = np.array([self._transform_label(label) for label in labels])
# texts_ids, labels_ids = self._filter_examples(texts_ids, labels_ids)
with open(os.path.join(self.data_dir, 'valid_texts_ids.dat'), 'wb') as fout:
pickle.dump(texts_ids, fout)
with open(os.path.join(self.data_dir, 'valid_labels_ids.dat'), 'wb') as fout:
pickle.dump(labels_ids, fout)
print("Valid Data Done {}".format(len(labels_ids)))
def dump_test_features(self, text_name, label_name):
text_path = os.path.join(self.data_dir, text_name)
label_path = os.path.join(self.data_dir, label_name)
texts, labels = self._get_data_from_json(text_path, label_path)
texts_ids = self.tokenizer.texts_to_sequences(texts)
# padding
texts_ids = tf.keras.preprocessing.sequence.pad_sequences(texts_ids,
maxlen=self.max_sentence_length,
padding='post',
truncating='post')
labels_ids = np.array([self._transform_label(label) for label in labels])
# texts_ids, labels_ids = self._filter_examples(texts_ids, labels_ids)
with open(os.path.join(self.data_dir, 'test_texts_ids.dat'), 'wb') as fout:
pickle.dump(texts_ids, fout)
with open(os.path.join(self.data_dir, 'test_labels_ids.dat'), 'wb') as fout:
pickle.dump(labels_ids, fout)
print("Test Data Done {}".format(len(labels_ids)))
def dump_word_embedding(self, vocabulary):
vocab_size = len(vocabulary)
print("vocabulary size is {}".format(vocab_size))
word_vectors = KeyedVectors.load_word2vec_format(self.word2vec_path, binary=True)
embed_size = word_vectors.vector_size
bound = np.sqrt(6.0 / embed_size)
vocab_size = len(vocabulary)
word_embeddings = np.random.uniform(-bound, bound, [vocab_size+1, embed_size])
for word in vocabulary:
# print(word)
if word in word_vectors:
word_embeddings[vocabulary[word], :] = word_vectors[word]
with open(os.path.join(self.data_dir, 'word_embeddings.dat'), 'wb') as fout:
pickle.dump(word_embeddings, fout)
def dump_meta_data(self):
with open(os.path.join(self.data_dir, "tokenizer.dat"), 'wb') as fout:
pickle.dump(self.tokenizer, fout)
with open(os.path.join(self.data_dir, "label_map.dat"), 'wb') as fout:
pickle.dump(self.label_map, fout)
with open(os.path.join(self.data_dir, "max_sentence_length.dat"), 'wb') as fout:
pickle.dump(self.max_sentence_length, fout)
def get_labels(self):
"""Gets the list of labels for this data set."""
raise self.labels
@classmethod
def _get_data_from_json(cls, text_path, label_path):
with open(text_path, 'rt') as fin:
texts = json.load(fin)
with open(label_path, 'rt') as fin:
labels = json.load(fin)
return texts, labels
@classmethod
def _filter_examples(cls, text_ids, label_ids):
output_text_ids = list()
output_label_ids = list()
count = 0
for text_id, label_id in zip(text_ids, label_ids):
if label_id is not None:
output_label_ids.append(label_id)
output_text_ids.append(text_id)
else:
count += 1
print("Filter {} examples".format(count))
return np.array(output_text_ids), np.array(output_label_ids)
if __name__ == "__main__":
data_dir = r'/home/yaojq/data/text/reuters'
word2vec_path = r'/home/yaojq/data/word_embedding/GoogleNews-vectors-negative300.bin'
print(data_dir)
max_seq_length = 512
processor = DataProcessor(data_dir, word2vec_path, max_seq_length)
processor.build_label_map("train_labels.txt", "valid_labels.txt", "test_labels.txt")
processor.dump_train_features("train_texts.txt", "train_labels.txt")
processor.dump_eval_features("valid_texts.txt", "valid_labels.txt")
processor.dump_test_features("test_texts.txt", "test_labels.txt")
processor.dump_meta_data()
```
#### File: dynamic_multi_label/utils/preprocess_word_embedding.py
```python
import os
import json
from tqdm import tqdm
def build_vocabulary(data_dir):
with open(os.path.join(data_dir, 'train_texts.txt'), 'rt') as fin:
train_texts = json.load(fin)
print("train text cuts load done")
with open(os.path.join(data_dir, "Telegram", 'train_key_words.dat'), 'rb') as fin:
train_key_words = pickle.load(fin)
print("train key_words load done")
words = set()
for train_text in tqdm(train_texts,miniters=1000):
for word in train_text:
words.add(word)
for key_word in tqdm(train_key_words,miniters=1000):
for word in key_word:
words.add(word)
with open(os.path.join(data_dir, "Telegram", "words.dat"), 'wb') as fout:
pickle.dump(words, fout)
print("Build Vocabulary Done!!!")
def get_word_embedding(data_home, word2vec_name):
with open(os.path.join(data_home, "Telegram", "words.dat"), 'rb') as fin:
words = pickle.load(fin)
telegram_word_embeddings = dict()
print("The number of words is {}".format(len(words)))
word2vec_path = os.path.join(data_home, "word_embedding", word2vec_name)
with open(word2vec_path, 'rt') as fin:
line = fin.readline()
words_num, embed_size = line.split()
print("The number of words is {}, the embedding size is {}".format(words_num, embed_size))
for line in tqdm(fin, miniters=5000):
word, embed = line.split(maxsplit=1)
if word in words:
try:
telegram_word_embeddings[word] = [float(vec) for vec in embed.split()]
except Exception as e:
print(e)
print(line)
vocab_size = len(telegram_word_embeddings)
with open(os.path.join(data_home, "word_embedding", "telegram_word_embedding.dat"), 'wb') as fout:
pickle.dump(telegram_word_embeddings, fout)
print("done!!!")
if __name__ == "__main__":
data_dir = r'/home/yaojq/data/text/reuters'
word2vec_path = "/home/yaojq/data/word_embedding/GoogleNews-vectors-negative300.bin"
print("build vocabulary")
build_vocabulary(data_dir)
get_word_embedding(data_dir, word2vec_path)
```
#### File: dynamic_multi_label/utils/text2tfidf.py
```python
import os
from scipy.sparse import csr_matrix
import pickle
import json
import warnings
from gensim.summarization import textcleaner
from tqdm import tqdm
import unicodedata
with warnings.catch_warnings():
warnings.simplefilter(action='ignore')
from gensim import corpora, models
class tfidf_text:
""" tfidf_text: a text model of linear_model
"""
def __init__(self):
self.stop_word = []
self.corpus = []
self.dictionary = corpora.Dictionary()
self.tfidf = None
def load_corpus_build_dict(self, train_texts):
# load corpus and build dictionary
# load stop word
print("load corpus and build dictionary ing...")
# load corpus and build dictionary
for line in train_texts:
tokens = [word for word in line
if word not in self.stop_word]
self.corpus.append(tokens)
self.dictionary.add_documents([tokens])
def filter_dictionary(self, no_below=5, no_above=0.7, keep_n=50000, keep_tokens=None):
"""
Filter out tokens that appear in
1. less than `no_below` documents (absolute number) or
2. more than `no_above` documents (fraction of total corpus size, *not*
absolute number).
3. if tokens are given in keep_tokens (list of strings), they will be kept regardless of
the `no_below` and `no_above` settings
4. after (1), (2) and (3), keep only the first `keep_n` most frequent tokens (or
keep all if `None`).
After the pruning, shrink resulting gaps in word ids.
**Note**: Due to the gap shrinking, the same word may have a different
word id before and after the call to this function!
"""
self.dictionary.filter_extremes(no_below=no_below,
no_above=no_above,
keep_n=keep_n,
keep_tokens=keep_tokens)
def to_csr(self, bows):
data = []
rows = []
cols = []
count = 0
for line in bows:
for elem in line:
rows.append(count)
cols.append(elem[0])
data.append(elem[1])
count += 1
bow_sparse_matrix = csr_matrix((data, (rows, cols)), shape=[count, self.dictionary.__len__()])
return bow_sparse_matrix
def load_train_data(self):
print("load train data ...")
train_bows = []
for tokens in self.corpus:
train_bows.append(self.dictionary.doc2bow(tokens))
# Transforming bows to tfidfs
self.tfidf = models.TfidfModel(train_bows)
train_tfidfs = [self.tfidf[bow] for bow in train_bows]
train_tfidfs = self.to_csr(train_tfidfs)
return train_tfidfs
def load_test_data(self, test_texts):
print("load test data...")
test_bows = []
for line in test_texts:
tokens = [word for word in line if word not in self.stop_word]
test_bows.append(self.dictionary.doc2bow(tokens))
# pred_labels = self.clf.predict(test_bows)
test_tfidfs = [self.tfidf[bow] for bow in test_bows]
test_tfidfs = self.to_csr(test_tfidfs)
return test_tfidfs
def load_tfidf_data(data_dir):
if os.path.isfile(os.path.join(data_dir, 'tfidf_trainX')):
print("load data from " + data_dir)
with(open(os.path.join(data_dir, 'tfidf_trainX'), 'rb')) as fin:
trainX = pickle.load(fin)
with(open(os.path.join(data_dir, 'tfidf_validX'), 'rb')) as fin:
validX = pickle.load(fin)
with(open(os.path.join(data_dir, 'tfidf_testX'), 'rb')) as fin:
testX = pickle.load(fin)
return [trainX, validX, testX]
else:
return None
def dump_tfidf_data(data_dir, data):
trainX, validX, testX = data
with(open(os.path.join(data_dir, 'tfidf_trainX'), 'wb')) as fout:
pickle.dump(trainX, fout)
with(open(os.path.join(data_dir, 'tfidf_validX'), 'wb')) as fout:
pickle.dump(validX, fout)
with(open(os.path.join(data_dir, 'tfidf_testX'), 'wb')) as fout:
pickle.dump(testX, fout)
def cut_word(texts):
output = []
for text in texts:
output.append(list(textcleaner.tokenize_by_word(text)))
return output
def load_text_data(data_dir):
with open(os.path.join(data_dir, 'train_texts.txt'), 'rt') as fin:
train_texts = json.load(fin)
trainX = cut_word(train_texts)
with open(os.path.join(data_dir, 'valid_texts.txt'), 'rt') as fin:
valid_texts = json.load(fin)
validX = cut_word(valid_texts)
with open(os.path.join(data_dir, 'test_texts.txt'), 'rt') as fin:
test_texts = json.load(fin)
testX = cut_word(test_texts)
return trainX, validX, testX
if __name__ == "__main__":
data_dir = r"/home/yaojq/data/text/reuters"
trainX, validX, testX = load_text_data(data_dir)
tfidf_text_obj = tfidf_text()
tfidf_text_obj.load_corpus_build_dict(trainX)
tfidf_text_obj.filter_dictionary()
trainX = tfidf_text_obj.load_train_data()
validX = tfidf_text_obj.load_test_data(validX)
testX = tfidf_text_obj.load_test_data(testX)
data = [trainX, validX, testX]
dump_tfidf_data(data_dir, data)
``` |
{
"source": "JiaquanYe/MASTER-mmocr",
"score": 2
} |
#### File: textrecog/convertors/master.py
```python
import torch
import mmocr.utils as utils
from mmocr.models.builder import CONVERTORS
from .base import BaseConvertor
import numpy as np
@CONVERTORS.register_module()
class MasterConvertor(BaseConvertor):
"""Convert between text, index and tensor for encoder-decoder based
pipeline.
Args:
dict_type (str): Type of dict, should be one of {'DICT36', 'DICT90'}.
dict_file (None|str): Character dict file path. If not none,
higher priority than dict_type.
dict_list (None|list[str]): Character list. If not none, higher
priority than dict_type, but lower than dict_file.
with_unknown (bool): If True, add `UKN` token to class.
max_seq_len (int): Maximum sequence length of label.
lower (bool): If True, convert original string to lower case.
start_end_same (bool): Whether use the same index for
start and end token or not. Default: True.
"""
def __init__(self,
dict_type='DICT90',
dict_file=None,
dict_list=None,
with_unknown=True,
max_seq_len=40,
lower=False,
start_end_same=True,
**kwargs):
super().__init__(dict_type, dict_file, dict_list)
assert isinstance(with_unknown, bool)
assert isinstance(max_seq_len, int)
assert isinstance(lower, bool)
self.with_unknown = with_unknown
self.max_seq_len = max_seq_len
self.lower = lower
self.start_end_same = start_end_same
self.update_dict()
def update_dict(self):
start_token = '<SOS>'
end_token = '<EOS>'
unknown_token = '<UKN>'
padding_token = '<PAD>'
# unknown
self.unknown_idx = None
if self.with_unknown:
self.idx2char.append(unknown_token)
self.unknown_idx = len(self.idx2char) - 1
# SOS/EOS
self.idx2char.append(start_token)
self.start_idx = len(self.idx2char) - 1
if not self.start_end_same:
self.idx2char.append(end_token)
self.end_idx = len(self.idx2char) - 1
# padding
self.idx2char.append(padding_token)
self.padding_idx = len(self.idx2char) - 1
# update char2idx
self.char2idx = {}
for idx, char in enumerate(self.idx2char):
self.char2idx[char] = idx
def str2tensor(self, strings):
"""
Convert text-string into tensor.
Args:
strings (list[str]): ['hello', 'world']
Returns:
dict (str: Tensor | list[tensor]):
tensors (list[Tensor]): [torch.Tensor([1,2,3,3,4]),
torch.Tensor([5,4,6,3,7])]
padded_targets (Tensor(bsz * max_seq_len))
"""
# ordinary OCR task strings is list of str, but table master is list of list.
assert utils.is_type_list(strings, str) or utils.is_type_list(strings, list)
tensors, padded_targets = [], []
indexes = self.str2idx(strings)
for index in indexes:
tensor = torch.LongTensor(index)
tensors.append(tensor)
# target tensor for loss
src_target = torch.LongTensor(tensor.size(0) + 2).fill_(0)
src_target[-1] = self.end_idx
src_target[0] = self.start_idx
src_target[1:-1] = tensor
padded_target = (torch.ones(self.max_seq_len) *
self.padding_idx).long()
char_num = src_target.size(0)
if char_num > self.max_seq_len:
# TODO:大于max_seq_len-2的,应该跳过
padded_target = src_target[:self.max_seq_len]
else:
# TODO:这里是最后一个是PAD token,而不是EOS,与FASTOCR不同,其最后一个是EOS.
padded_target[:char_num] = src_target
padded_targets.append(padded_target)
padded_targets = torch.stack(padded_targets, 0).long()
return {'targets': tensors, 'padded_targets': padded_targets}
def tensor2idx(self, outputs, img_metas=None):
"""
Convert output tensor to text-index
Args:
outputs (tensor): model outputs with size: N * T * C
img_metas (list[dict]): Each dict contains one image info.
Returns:
indexes (list[list[int]]): [[1,2,3,3,4], [5,4,6,3,7]]
scores (list[list[float]]): [[0.9,0.8,0.95,0.97,0.94],
[0.9,0.9,0.98,0.97,0.96]]
"""
batch_size = outputs.size(0)
ignore_indexes = [self.padding_idx]
indexes, scores = [], []
for idx in range(batch_size):
seq = outputs[idx, :, :]
seq = seq.softmax(-1)
max_value, max_idx = torch.max(seq, -1)
str_index, str_score = [], []
output_index = max_idx.cpu().detach().numpy().tolist()
output_score = max_value.cpu().detach().numpy().tolist()
for char_index, char_score in zip(output_index, output_score):
if char_index in ignore_indexes:
continue
if char_index == self.end_idx:
break
str_index.append(char_index)
str_score.append(char_score)
indexes.append(str_index)
scores.append(str_score)
return indexes, scores
``` |
{
"source": "jiaranda/games-analytics",
"score": 2
} |
#### File: games-analytics/gameAnalytics/views.py
```python
from django.http import JsonResponse
def index(request):
return JsonResponse({"message": "Hello World! Go to /gameAnalytics/graphql"})
``` |
{
"source": "jiarong/khmer",
"score": 3
} |
#### File: figuregen/assemble/extract-partitions.py
```python
import sys
import os.path
import screed
import argparse
DEFAULT_MAX_SIZE=int(1e5)
DEFAULT_THRESHOLD=5
def read_partition_file(filename):
for n, record in enumerate(screed.open(filename)):
name = record.name
name, partition_id = name.rsplit('\t', 1)
yield n, name, int(partition_id), record.sequence
###
def main():
parser = argparse.ArgumentParser(description="Extract partitioned seqs.")
parser.add_argument('prefix')
parser.add_argument('part_filenames', nargs='+')
parser.add_argument('--max-size', '-X', dest='max_size',
default=DEFAULT_MAX_SIZE, type=int,
help='Max group size (n sequences)')
parser.add_argument('--min-partition-size', '-m', dest='min_part_size',
default=DEFAULT_THRESHOLD, type=int,
help='Minimum partition size worth keeping')
parser.add_argument('--no-output-groups', '-n', dest='output_groups',
default=True, action='store_false',
help='Do not actually output groups files.')
parser.add_argument('--output-unassigned', '-U', dest='output_unass',
default=False, action='store_true',
help='Output unassigned sequences, too')
args = parser.parse_args()
MAX_SIZE = args.max_size
THRESHOLD = args.min_part_size
output_groups = args.output_groups
output_unassigned = args.output_unass
prefix = args.prefix
distfilename = prefix + '.dist'
print '---'
print 'reading partitioned files:', repr(args.part_filenames)
if output_groups:
print 'outputting to files named "%s.groupN.fa"' % prefix
print 'min reads to keep a partition:', THRESHOLD
print 'max size of a group file:', MAX_SIZE
else:
print 'NOT outputting groups! Beware!'
if output_unassigned:
print 'outputting unassigned reads to "%s.unassigned.fa"' % prefix
print 'partition size distribution will go to %s' % distfilename
print '---'
###
if output_unassigned:
unassigned_fp = open('%s.unassigned.fa' % prefix, 'w')
count = {}
for filename in args.part_filenames:
for n, name, pid, seq in read_partition_file(filename):
if n % 100000 == 0:
print '...', n
count[pid] = count.get(pid, 0) + 1
if pid == 0 and output_unassigned:
print >>unassigned_fp, '>%s\n%s' % (name, seq)
if output_unassigned:
unassigned_fp.close()
if 0 in count: # eliminate unpartitioned sequences
del count[0]
# develop histogram of partition sizes
dist = {}
for pid, size in count.items():
dist[size] = dist.get(size, 0) + 1
# output histogram
distfp = open(distfilename, 'w')
total = 0
wtotal = 0
for c, n in sorted(dist.items()):
total += n
wtotal += c*n
distfp.write('%d %d %d %d\n' % (c, n, total, wtotal))
distfp.close()
if not output_groups:
sys.exit(0)
# sort groups by size
divvy = sorted(count.items(), key=lambda y:y[1])
divvy = filter(lambda y:y[1] > THRESHOLD, divvy)
## divvy up into different groups, based on having MAX_SIZE sequences
## in each group.
total = 0
group = set()
group_n = 0
group_d = {}
for partition_id, n_reads in divvy:
group.add(partition_id)
total += n_reads
if total > MAX_SIZE:
for partition_id in group:
group_d[partition_id] = group_n
#print 'group_d', partition_id, group_n
group_n += 1
group = set()
total = 0
if group:
for partition_id in group:
group_d[partition_id] = group_n
#print 'group_d', partition_id, group_n
group_n += 1
print '%d groups' % group_n
if group_n == 0:
print 'nothing to output; exiting!'
return
## open a bunch of output files for the different groups
group_fps = {}
for n in range(group_n):
fp = open('%s.group%04d.fa' % (prefix, n), 'w')
group_fps[n] = fp
## write 'em all out!
for filename in args.part_filenames:
for n, name, partition_id, seq in read_partition_file(filename):
if n % 100000 == 0:
print '...x2', n
if partition_id == 0:
continue
try:
group_n = group_d[partition_id]
except KeyError:
assert count[partition_id] <= THRESHOLD
continue
outfp = group_fps[group_n]
outfp.write('>%s\t%s\n%s\n' % (name, partition_id, seq))
if __name__ == '__main__':
main()
```
#### File: figuregen/assemble/load-graph.py
```python
import khmer, sys
import threading
import Queue
import gc
import os.path
import sys, screed
import khmer
from khmer.hashbits_args import build_construct_args, DEFAULT_MIN_HASHSIZE
def main():
parser = build_construct_args()
parser.add_argument('--build-tagset', '-t', default=True,
action='store_false',
help='Construct tagset while loading sequences')
parser.add_argument('output_filename')
parser.add_argument('input_filenames', nargs='+')
args = parser.parse_args()
if not args.quiet:
if args.min_hashsize == DEFAULT_MIN_HASHSIZE:
print>>sys.stderr, "** WARNING: hashsize is default! You absodefly want to increase this!\n** Please read the docs!"
print>>sys.stderr, '\nPARAMETERS:'
print>>sys.stderr, ' - kmer size = %d \t\t(-k)' % args.ksize
print>>sys.stderr, ' - n hashes = %d \t\t(-N)' % args.n_hashes
print>>sys.stderr, ' - min hashsize = %-5.2g \t(-x)' % args.min_hashsize
print>>sys.stderr, ''
print>>sys.stderr, 'Estimated memory usage is %.2g bytes (n_hashes x min_hashsize / 8)' % (args.n_hashes * args.min_hashsize / 8.)
print>>sys.stderr, '-'*8
K=args.ksize
HT_SIZE=args.min_hashsize
N_HT=args.n_hashes
base = args.output_filename
filenames = args.input_filenames
print 'Saving hashtable to %s' % base
print 'Loading kmers from sequences in %s' % repr(filenames)
###
print 'making hashtable'
ht = khmer.new_hashbits(K, HT_SIZE, N_HT)
for n, filename in enumerate(filenames):
print 'consuming input', filename
ht.consume_fasta_and_tag(filename)
print 'saving hashtable in', base + '.ht'
ht.save(base + '.ht')
print 'saving tagset in', base + '.tagset'
ht.save_tagset(base + '.tagset')
fp_rate = khmer.calc_expected_collisions(ht)
print 'fp rate estimated to be %1.3f' % fp_rate
if fp_rate > 0.15: # 0.18 is ACTUAL MAX. Do not change.
print >>sys.stderr, "**"
print >>sys.stderr, "** ERROR: the graph structure is too small for"
print >>sys.stderr, "** this data set. Increase hashsize/num ht."
print >>sys.stderr, "**"
sys.exit(-1)
if __name__ == '__main__':
main()
```
#### File: khmer/figuregen/clustsize.py
```python
import khmer
import sys
import random
import math
import numpy
def callback(a, b, c):
pass
bases = ['A', 'C', 'G', 'T']
#p = float(sys.argv[1])
def estimate_mean(x):
'''
uses simple percentile bootstrap to generate confidence intervals
'''
n = len(x)
upper = 0
med = 0
lower = 0
means = []
for i in range(200):
tmp = []
for j in range(n):
tmp.append(x[random.randint(0,n-1)])
means.append(numpy.mean(tmp))
means = sorted(means)
lower = means[4]
med = means[99]
upper = means[194]
return lower, med, upper
def calc_ht_size(m, k):
return int(m / k)
def calc_m(n, p):
n = float(n)
p = float(p)
return int(0 - (n*math.log(p))/(math.log(2)**2))
def opt_ht(m, n):
m = float(m)
n = float(n)
k = (m / n) * math.log(2)
return int(max(1, round(k)))
def generate_read(n):
read_list = []
for i in range(n):
read_list.append(random.choice(bases))
return ''.join(read_list)
def get_neighbors(kmer_hash, K):
neighbors = []
kmer = khmer.reverse_hash(kmer_hash, K)
begin = kmer[0:len(kmer)-1]
end = kmer[1:len(kmer)]
for base in bases:
neighbors.append(khmer.forward_hash(base + begin, K))
neighbors.append(khmer.forward_hash(end + base, K))
return set(neighbors)
def explore(ht, start_kmer, K):
discovered = set()
explored = set()
start_kmer_hash = khmer.forward_hash(start_kmer, K)
if ht.get(kmer):
discovered.add(start_kmer_hash)
else:
return 0
while(len(discovered) > 0 and (len(explored) < 2000000)):
kmer_hash = discovered.pop()
kmer_neighbors = get_neighbors(kmer_hash, K)
explored.add(kmer_hash)
for neigh_hash in kmer_neighbors:
if ht.get(khmer.reverse_hash(neigh_hash, K)) and neigh_hash not in explored and neigh_hash not in discovered:
discovered.add(neigh_hash)
return len(explored)
print "\"FPR\",\"LOWER\",\"AVG\",\"UPPER\""
K = 31
ps = [x/100.0 for x in range(1, 18)]
ps.append(0.1725)
for p in ps:
sizes = []
for i in range(10000):
n = 1000
contig_size = K
m = calc_m(n, p)
k = opt_ht(m, n)
HT_SIZE = calc_ht_size(m, k)
ht = khmer.new_hashbits(K, HT_SIZE, k)
for j in range(n):
kmer = generate_read(contig_size)
ht.consume(kmer)
kmer = generate_read(K)
sizes.append(explore(ht, kmer, K))
#avg = numpy.mean(sizes)
#se = numpy.std(sizes) / numpy.sqrt(len(sizes))
#lim = se * 1.96
#print str(p) + "," + str(avg-lim) + "," + str(avg) + "," + str(avg+lim)
low, med, upp = estimate_mean(sizes)
print str(p) + "," + str(low) + "," + str(med) + "," + str(upp)
```
#### File: ged-lab/armo-gjr/abunPart-mthread.py
```python
import khmer
import sys, threading, time, argparse, cPickle, math, os
import array
import subprocess
from khmer.threading_args import add_threading_args
"""
Count the median/avg k-mer abundance for each sequence in the input file,
based on the k-mer counts in the given counting hash. Can be used to
estimate expression levels (mRNAseq) or coverage (genomic/metagenomic).
% scripts/count-median.py <htname> <input seqs> <output counts>
Use '-h' for parameter help.
The output is pickled dict contains sequence id, median.
NOTE: All 'N's in the input sequences are converted to 'G's.
"""
#def main():
start = time.time()
parser = argparse.ArgumentParser(
description='Count k-mers summary stats for sequences')
add_threading_args(parser)
parser.add_argument('htfile')
parser.add_argument('input')
parser.add_argument('label')
parser.add_argument('upcutoff')
parser.add_argument('lowcutoff')
args = parser.parse_args()
htfile = args.htfile
input_filename = args.input
label = args.label
NUM = int(args.upcutoff)
NUM = math.log(NUM, 2)
NUM = int(math.floor(NUM))
NUM = 2**NUM
lowcutoff = int(args.lowcutoff)
n_threads = int(args.n_threads)
print >> sys.stderr, 'threads used: %d' %n_threads
config = khmer.get_config()
bufsz = config.get_reads_input_buffer_size()
config.set_reads_input_buffer_size(n_threads * 1 * 1024 * 1024)
rparser = khmer.ReadParser(input_filename, n_threads)
print >> sys.stderr, 'loading counting hash from %s' %htfile
ht = khmer.load_counting_hash(htfile)
K = ht.ksize()
MAXCOUNT = 2**16 - 1
COUNTING_LIS = [0]*(MAXCOUNT+1)
DICT_CNT_ARRAY = {}
DICT_MEAN1_CNT = {}
DICT_MED1_FILT_CNT = {}
DICT_MEAN1_FILE = {}
DICT_MEAN1_FILE_NAMES = {}
DICT_MED1_FILE = {}
DICT_MED1_FILE_NAMES = {}
dF = {}
dF_NAMES = {}
for tnum in xrange(n_threads):
DICT_CNT_ARRAY[tnum] = array.array('I', COUNTING_LIS)
DICT_MEAN1_CNT[tnum] = 0
DICT_MED1_FILT_CNT[tnum] = 0
fname = '%s.mean1.fasta.thread%d' %(label, tnum)
DICT_MEAN1_FILE_NAMES[tnum] = fname
DICT_MEAN1_FILE[tnum] = open(fname, 'wb')
fname = '%s.%d.filtered.fasta.thread%d' %(label,lowcutoff,tnum)
DICT_MED1_FILE_NAMES[tnum] = fname
DICT_MED1_FILE[tnum] = open(fname, 'wb')
i = 2
dF[tnum] = {}
while (i <= NUM):
if (i*4 <= lowcutoff):
i *= 2
continue
low_bound = i
if (i < lowcutoff):
low_bound = lowcutoff+1
fname = '%s.%dto%d.fasta.thread%d' %(label, low_bound, i*4, tnum)
dF_NAMES.setdefault(i, [])
dF_NAMES[i].append(fname)
dF[tnum][i] = open(fname, 'wb')
i *= 2
fname = '%s.%dtoMAX.fasta.thread%d' %(label, i, tnum)
dF_NAMES.setdefault(i, [])
dF_NAMES[i].append(fname)
dF[tnum][i] = open(fname, 'wb')
end1 = time.time()
print >> sys.stderr, 'loading took: %d sec' %(end1 - start)
###
def count_median(rparser, tnum):
for n, record in enumerate(rparser):
seq = record.sequence.upper()
name = record.name
if 'N' in seq:
seq = seq.replace('N', 'G')
if K <= len(seq):
a, b, c = ht.get_median_count(seq)
if a > MAXCOUNT:
a = MAXCOUNT
DICT_CNT_ARRAY[tnum][a] += 1
if (a <= lowcutoff):
if (b == 1):
print >> DICT_MEAN1_FILE[tnum], '>%s\n%s' %(record.name,\
seq)
DICT_MEAN1_CNT[tnum] += 1
else:
print >> DICT_MED1_FILE[tnum], '>%s\n%s' %(record.name,\
seq)
DICT_MED1_FILT_CNT[tnum] += 1
else:
tempN = math.log(a, 2)
tempN = int(math.floor(tempN))
tempN = 2**tempN
if tempN >= NUM*4:
print >> dF[tnum][NUM*2], '>%s\n%s' %(record.name, seq)
else:
if tempN/2 != 1:
print >> dF[tnum][tempN/2], \
'>%s\n%s' %(record.name, seq)
print >> dF[tnum][tempN], '>%s\n%s' %(record.name, seq)
if n%1e6 == 0:
print >> sys.stderr, '%d process by thread %d' %(n, tnum)
###
threads = []
for tnum in xrange(n_threads):
#print >> sys.stderr, 'start counting with %d threads' %tnum
t = threading.Thread(
target=count_median,
args=(rparser,tnum,)
)
threads.append(t)
t.start()
for t in threads:
t.join()
end2 = time.time()
# wait for all threads to print to std
time.sleep(10)
print >> sys.stderr, 'counting took: %d sec' %(end2 - end1)
# merge array
merged_array = zip(*DICT_CNT_ARRAY.values())
for i in xrange(len(COUNTING_LIS)):
COUNTING_LIS[i] = sum(merged_array[i])
# add up variables
mean1_cnt = sum(DICT_MEAN1_CNT.values())
med1_filt_cnt = sum(DICT_MED1_FILT_CNT.values())
print >> sys.stderr, 'finshed..'
print >> sys.stderr, '%d reads processed in total' %sum(COUNTING_LIS)
print >> sys.stderr, 'number of reads with mean kmer count 1 (singleton): %d'\
%mean1_cnt
print >> sys.stderr, 'number of reads with median kmer count <= %d' \
'(not including singleton): %d' \
%(lowcutoff, med1_filt_cnt)
cnt = len(COUNTING_LIS)-1
while (cnt >= 0):
if (COUNTING_LIS[cnt] != 0):
max_med_cnt = cnt
max_med_cnt_abun = COUNTING_LIS[cnt]
break
cnt -= 1
print >> sys.stderr, 'max median count is %d with %d reads ' \
%(max_med_cnt, max_med_cnt_abun)
# close files to flush
for tnum in xrange(n_threads):
DICT_MEAN1_FILE[tnum].close()
DICT_MED1_FILE[tnum].close()
for fp in dF[tnum].values():
fp.close()
# merge files
mean1_file_name_lis = DICT_MEAN1_FILE_NAMES.values()
mean1_file_name = mean1_file_name_lis[0].rsplit('.', 1)[0]
p = subprocess.Popen('cat %s > %s' % (' '.join(mean1_file_name_lis),
mean1_file_name),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
assert p.returncode == 0, (stdout, stderr)
#remove .thread files
for fname in mean1_file_name_lis:
os.remove(fname)
# merge
med1_file_name_lis = DICT_MED1_FILE_NAMES.values()
med1_file_name = med1_file_name_lis[0].rsplit('.', 1)[0]
p = subprocess.Popen('cat %s > %s' % (' '.join(med1_file_name_lis),
med1_file_name),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
assert p.returncode == 0, (stdout, stderr)
#remove .thread files
for fname in med1_file_name_lis:
os.remove(fname)
for flis in dF_NAMES.values():
assert flis[0].rsplit('.',1)[0] == flis[-1].rsplit('.',1)[0]
#merge
file_name = flis[0].rsplit('.', 1)[0]
p = subprocess.Popen('cat %s > %s' % (' '.join(flis),
file_name),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
assert p.returncode == 0, (stdout, stderr)
#remove .thread files
for fname in flis:
os.remove(fname)
tempN = math.log(max_med_cnt, 2)
tempN = int(math.floor(tempN))+1
NUM_log = int(math.log(NUM,2))
if tempN < NUM_log + 1:
os.remove('%s.%dtoMAX.fasta' %(label, NUM*2))
for i in range(tempN, NUM_log+1):
ind = 2**i
os.remove('%s.%dto%d.fasta' %(label, ind, ind*4))
output = open('%s.dist' %label, 'wb')
for i in xrange(len(COUNTING_LIS)):
print >> output, '%d\t%d' %(i, COUNTING_LIS[i])
#if __name__ == '__main__':
# main()
```
#### File: ged-lab/armo-gjr/assemble-individual-groups-sga.py
```python
import sys
import tempfile
from screed.fasta import fasta_iter
import shutil
import os.path
import subprocess
LENGTH_CUTOFF=400
SGA_PIPE='/mnt/home/guojiaro/Documents/lib/git/khmer/ged-lab/armo-gjr/sga-pipe.sh'
print >> sys.stderr, '### make sure sga-pipe.sh path is right: $SGA_PIPE'
scripts_dir = os.path.dirname(__file__)
scripts_dir = os.path.abspath(scripts_dir)
def assemble_sequences(f, k, length_cutoff=LENGTH_CUTOFF):
try:
seqfile = f
#dirname = os.path.dirname(os.path.abspath(f))
dirname = tempfile.mkdtemp()
assemble_dir = os.path.join(dirname, 'assemble')
p = subprocess.Popen('bash %s %s %d %s' % (SGA_PIPE, seqfile, k, assemble_dir), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print 'bash %s %s %d %s' % (SGA_PIPE, seqfile, k, assemble_dir)
(stdout, stderr) = p.communicate()
assert p.returncode == 0, (stdout, stderr)
x = []
total = 0
print os.listdir(assemble_dir)
for r in fasta_iter(open(os.path.join(assemble_dir, '%s.sga.%d-contigs.fa' %(os.path.basename(f), k)))):
seqlen = len(r['sequence'])
if seqlen >= length_cutoff:
x.append(r)
total += seqlen
return total, x
finally:
pass
shutil.rmtree(dirname)
#print 'XXX', dirname
def best_assemble_sequences(f, try_k=(30, 40, 50, 70)):
best_k = try_k[0]
best_total, best_records = assemble_sequences(f, best_k)
print 'total: %.2f(Mbp)\tk: %d\t%s' %(float(best_total)/1e6, best_k, os.path.basename(f))
for k in try_k[1:]:
total, records = assemble_sequences(f, k)
print 'total: %.2f(Mbp)\tk: %d\t%s' %(float(total)/1e6, k, os.path.basename(f))
if total > best_total:
best_total = total
best_records = records
best_k = k
return best_k, best_total, best_records
group = 'nogroup'
for i in sys.argv[1].split('.'):
if 'group' in i:
group = i
k, total, records = best_assemble_sequences(sys.argv[1])
print
print 'best assembly for %s: k=%d, %d bp' % (sys.argv[1], k, total)
fp = open(sys.argv[1] + '.sga.%d.best' %k, 'wb')
for n,r in enumerate(records):
fp.write('>%s.%d\n%s\n' % (group, n, r['sequence']))
fp.close()
```
#### File: ged-lab/armo-gjr/test_mthread.py
```python
import khmer, sys, threading, time
###
s = set()
def read_names(rparser, tnum):
print 'started', tnum
n = 0
for n, read in enumerate(rparser):
s.add(read.name)
if n % 1000 == 0:
print 'sleeping', tnum, n
time.sleep(0.2)
print 'done', tnum, 'got', n
###
filename = sys.argv[1]
n_threads = int(sys.argv[2])
config = khmer.get_config()
bufsz = config.get_reads_input_buffer_size()
config.set_reads_input_buffer_size(n_threads * 64 * 1024)
rparser = khmer.ReadParser(filename, n_threads)
print 'starting threads'
threads = []
for tnum in xrange(n_threads):
print 'starting', tnum
t = threading.Thread(target=read_names, args=(rparser, tnum ))
threads.append(t)
t.start()
for t in threads:
t.join()
print 'done; loaded %s sequences' % len(s)
```
#### File: khmer/khmer/threading_args.py
```python
DEFAULT_N_THREADS = 1
def add_threading_args(parser):
parser.add_argument(
'--threads', '-T', dest='n_threads',
default=DEFAULT_N_THREADS,
help='Number of simultaneous threads to execute'
)
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
```
#### File: khmer/sandbox/graph-size-circum-trim.py
```python
import khmer
import sys
import screed
import os.path
import threading
import Queue
import gc
K = 32
HASHTABLE_SIZE = int(1e9)
THRESHOLD = 500
N_HT = 4
WORKER_THREADS = 5
###
RADIUS = 2
MAX_CIRCUM = 4 # 4 seems to eliminate lump in 1m.fa
MAX_VOLUME = 200
incr = 2 * RADIUS
###
GROUPSIZE = 100
###
class SequenceGroup(object):
def __init__(self, order, seqlist):
self.order = order
self.seqlist = seqlist
def is_pair(r1, r2):
a = r1['name'].split('/')[0]
b = r2['name'].split('/')[0]
return (a == b)
def trim_by_circumference(ht, name, seq):
# calculate circumference for every point.
end = len(seq) - K
is_high = False
pos = 0
for pos in range(0, end, incr):
circum = ht.count_kmers_on_radius(seq[pos:pos + K], RADIUS, MAX_VOLUME)
if circum >= MAX_CIRCUM:
is_high = True
break
# ok. sequence has high-radius k-mers; can we trim them off?
if is_high and pos > incr:
pos -= incr
# find last k-mer with a low radius:
i = 1
for i in range(1, incr):
circum = ht.count_kmers_on_radius(seq[pos + i:pos + i + K],
RADIUS, MAX_VOLUME)
if circum >= MAX_CIRCUM:
break
pos += i - 1
# now trim sequence:
seq = seq[:pos + K]
is_high = False
name += "\tTRUNC.%d" % pos
if is_high:
return None, None
else:
return name, seq
def process(inq, outq, ht):
global worker_count
while not done or not inq.empty():
try:
g = inq.get(True, 1)
except Queue.Empty:
continue
x = []
last_record = None
for record in g.seqlist:
kmer = record['sequence'][:K]
size = ht.calc_connected_graph_size(kmer, THRESHOLD)
if size >= THRESHOLD:
# keep pairs together if either is "good"
if last_record and is_pair(last_record, record):
x.append(last_record)
x.append(record)
record = None
last_record = record
y = []
for record in x:
name, seq = trim_by_circumference(ht, record['name'],
record['sequence'])
if name:
y.append((name, seq))
gg = SequenceGroup(g.order, y)
outq.put(gg)
worker_count -= 1
def write(outq, outfp):
global worker_count
groups = {}
next_group = 0
while worker_count > 0 or not outq.empty():
try:
g = outq.get(True, 1)
except Queue.Empty:
continue
groups[g.order] = g
while next_group in groups:
g = groups[next_group]
for name, seq in g.seqlist:
outfp.write('>%s\n%s\n' % (name, seq,))
del groups[next_group]
next_group += 1
gc.collect()
def main():
global done, worker_count
done = False
worker_count = 0
infile = sys.argv[1]
outfile = os.path.basename(infile) + '.graphcirc'
if len(sys.argv) == 3:
outfile = sys.argv[2]
print 'creating ht'
ht = khmer.new_hashbits(K, HASHTABLE_SIZE, N_HT)
print 'eating fa', infile
total_reads, n_consumed = ht.consume_fasta(infile)
outfp = open(outfile, 'w')
inqueue = Queue.Queue(50)
outqueue = Queue.Queue(50)
## worker and writer threads
for i in range(WORKER_THREADS):
t = threading.Thread(target=process, args=(inqueue, outqueue, ht))
worker_count += 1
t.start()
threading.Thread(target=write, args=(outqueue, outfp)).start()
### main thread
x = []
i = 0
group_n = 0
for n, record in enumerate(screed.fasta.fasta_iter(open(infile))):
if n % 10000 == 0:
print '...', n
i += 1
if i > GROUPSIZE:
this_name = record['name'].split('/')[0]
last_name = x[-1]['name'].split('/')[0]
if is_pair(record, x[-1]): # preserve pairs
x.append(record)
g = SequenceGroup(group_n, x)
inqueue.put(g)
x = []
else:
g = SequenceGroup(group_n, x)
inqueue.put(g)
x = [record]
group_n += 1
i = 0
else:
x.append(record)
# submit last set of sequences
g = SequenceGroup(group_n, x)
inqueue.put(g)
done = True
if __name__ == '__main__':
main()
```
#### File: khmer/sandbox/normalize-by-min.py
```python
import sys
import screed
import os
import khmer
from khmer.counting_args import build_construct_args, DEFAULT_MIN_HASHSIZE
DEFAULT_MINIMUM_COVERAGE = 5
def main():
parser = build_construct_args()
parser.add_argument('-C', '--cutoff', type=int, dest='cutoff',
default=DEFAULT_MINIMUM_COVERAGE)
parser.add_argument('-s', '--savehash', dest='savehash', default='')
parser.add_argument('-l', '--loadhash', dest='loadhash',
default='')
parser.add_argument('input_filenames', nargs='+')
args = parser.parse_args()
if not args.quiet:
if args.min_hashsize == DEFAULT_MIN_HASHSIZE:
print >>sys.stderr, "** WARNING: hashsize is default! " \
"You absodefly want to increase this!\n** " \
"Please read the docs!"
print >>sys.stderr, '\nPARAMETERS:'
print >>sys.stderr, ' - kmer size = %d \t\t(-k)' % args.ksize
print >>sys.stderr, ' - n hashes = %d \t\t(-N)' % args.n_hashes
print >>sys.stderr, ' - min hashsize = %-5.2g \t(-x)' % \
args.min_hashsize
print >>sys.stderr, ''
print >>sys.stderr, 'Estimated memory usage is %.2g bytes ' \
'(n_hashes x min_hashsize)' % (
args.n_hashes * args.min_hashsize)
print >>sys.stderr, '-' * 8
K = args.ksize
HT_SIZE = args.min_hashsize
N_HT = args.n_hashes
DESIRED_COVERAGE = args.cutoff
filenames = args.input_filenames
if args.loadhash:
print 'loading hashtable from', args.loadhash
ht = khmer.load_counting_hash(args.loadhash)
else:
print 'making hashtable'
ht = khmer.new_counting_hash(K, HT_SIZE, N_HT)
total = 0
discarded = 0
for input_filename in filenames:
output_name = os.path.basename(input_filename) + '.minkeep'
outfp = open(output_name, 'w')
for n, record in enumerate(screed.open(input_filename)):
if n > 0 and n % 10000 == 0:
print '... kept', total - discarded, 'of', total, ', or', \
int(100. - discarded / float(total) * 100.), '%'
print '... in file', input_filename
total += 1
if len(record.sequence) < K:
continue
seq = record.sequence.replace('N', 'A')
mincount = ht.get_min_count(seq)
if mincount < DESIRED_COVERAGE:
ht.consume(seq)
outfp.write('>%s\n%s\n' % (record.name, record.sequence))
else:
discarded += 1
print 'DONE with', input_filename, '; kept', total - discarded, 'of',\
total, 'or', int(100. - discarded / float(total) * 100.), '%'
print 'output in', output_name
if args.savehash:
print 'Saving hashfile through', input_filename
print '...saving to', args.savehash
ht.save(args.savehash)
# Change 0.2 only if you really grok it. HINT: You don't.
fp_rate = khmer.calc_expected_collisions(ht)
print 'fp rate estimated to be %1.3f' % fp_rate
if fp_rate > 0.20:
print >>sys.stderr, "**"
print >>sys.stderr, "** ERROR: the counting hash is too small for"
print >>sys.stderr, "** this data set. Increase hashsize/num ht."
print >>sys.stderr, "**"
print >>sys.stderr, "** Do not use these results!!"
sys.exit(-1)
if __name__ == '__main__':
main()
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
```
#### File: khmer/scripts/count-median.py
```python
import sys
import screed
import os
import khmer
import argparse
###
def main():
parser = argparse.ArgumentParser(
description='Count k-mers summary stats for sequences')
parser.add_argument('htfile')
parser.add_argument('input')
parser.add_argument('output')
args = parser.parse_args()
htfile = args.htfile
input_filename = args.input
output_filename = args.output
print 'loading counting hash from', htfile
ht = khmer.load_counting_hash(htfile)
K = ht.ksize()
print 'writing to', output_filename
output = open(output_filename, 'w')
for record in screed.open(input_filename):
seq = record.sequence.upper()
if 'N' in seq:
seq = seq.replace('N', 'G')
if K <= len(seq):
a, b, c = ht.get_median_count(seq)
print >>output, record.name, a, b, c, len(seq)
if __name__ == '__main__':
main()
```
#### File: khmer/scripts/load-into-counting.py
```python
import sys
import threading
import khmer
from khmer.counting_args import build_construct_args, report_on_config
from khmer.threading_args import add_threading_args
###
def main():
parser = build_construct_args()
add_threading_args(parser)
parser.add_argument('output_filename')
parser.add_argument('input_filenames', nargs='+')
parser.add_argument('-b', '--no-bigcount', dest='bigcount', default=True,
action='store_false',
help='Do not count k-mers past 255')
args = parser.parse_args()
report_on_config(args)
K = args.ksize
HT_SIZE = args.min_hashsize
N_HT = args.n_hashes
base = args.output_filename
filenames = args.input_filenames
n_threads = int(args.n_threads)
print 'Saving hashtable to %s' % base
print 'Loading kmers from sequences in %s' % repr(filenames)
###
print 'making hashtable'
ht = khmer.new_counting_hash(K, HT_SIZE, N_HT, n_threads)
ht.set_use_bigcount(args.bigcount)
config = khmer.get_config()
bufsz = config.get_reads_input_buffer_size()
config.set_reads_input_buffer_size(n_threads * 64 * 1024)
for n, filename in enumerate(filenames):
rparser = khmer.ReadParser(filename, n_threads)
threads = []
print 'consuming input', filename
for tnum in xrange(n_threads):
t = \
threading.Thread(
target=ht.consume_fasta_with_reads_parser,
args=(rparser, )
)
threads.append(t)
t.start()
for t in threads:
t.join()
if n > 0 and n % 10 == 0:
print 'mid-save', base
ht.save(base)
open(base + '.info', 'w').write('through %s' % filename)
print 'saving', base
ht.save(base)
info_fp = open(base + '.info', 'w')
info_fp.write('through end: %s\n' % filename)
# Change 0.2 only if you really grok it. HINT: You don't.
fp_rate = khmer.calc_expected_collisions(ht)
print 'fp rate estimated to be %1.3f' % fp_rate
print >>info_fp, 'fp rate estimated to be %1.3f' % fp_rate
if fp_rate > 0.20:
print >>sys.stderr, "**"
print >>sys.stderr, "** ERROR: the counting hash is too small for"
print >>sys.stderr, "** this data set. Increase hashsize/num ht."
print >>sys.stderr, "**"
sys.exit(-1)
print 'DONE.'
if __name__ == '__main__':
main()
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
```
#### File: khmer/tests/test_functions.py
```python
import khmer
def test_forward_hash():
assert khmer.forward_hash('AAAA', 4) == 0
assert khmer.forward_hash('TTTT', 4) == 0
assert khmer.forward_hash('CCCC', 4) == 170
assert khmer.forward_hash('GGGG', 4) == 170
def test_forward_hash_no_rc():
h = khmer.forward_hash_no_rc('AAAA', 4)
assert h == 0, h
h = khmer.forward_hash_no_rc('TTTT', 4)
assert h == 85, h
h = khmer.forward_hash_no_rc('CCCC', 4)
assert h == 170, h
h = khmer.forward_hash_no_rc('GGGG', 4)
assert h == 255, h
def test_reverse_hash():
s = khmer.reverse_hash(0, 4)
assert s == "AAAA"
s = khmer.reverse_hash(85, 4)
assert s == "TTTT"
s = khmer.reverse_hash(170, 4)
assert s == "CCCC"
s = khmer.reverse_hash(255, 4)
assert s == "GGGG"
def test_get_primes():
primes = khmer.get_n_primes_near_x(7, 20)
assert primes == [19, 17, 13, 11, 7, 5, 3]
```
#### File: khmer/tests/test_khmer_config.py
```python
import khmer
# NOTE: Currently the wrapper only supports a config singleton.
# In the future, manipulation of multiple configs may be allowed.
# The following alias is a hedge against the future.
from khmer import get_config as get_active_config
def test_EXISTENCE_has_extra_sanity_checks( ):
"""
Verify that 'has_extra_sanity_checks' exists.
An exception should be thrown if a config object cannot be obtained.
"""
config = get_active_config( )
assert "has_extra_sanity_checks" in dir( config )
def check_attribute_exists( config, attr_name ):
"""
Helper function for testing attribute existence.
"""
assert True == hasattr( config, attr_name ), attr_name
def test_EXISTENCE_OTHERS( ):
"""
Verify that all of the various attributes exist.
"""
config = get_active_config( )
for attr_name in \
[
"set_number_of_threads", "get_number_of_threads",
"get_reads_input_buffer_size", "set_reads_input_buffer_size",
]:
yield check_attribute_exists, config, attr_name
#def test_1_ARGS_set_number_of_threads( ):
# """
# Verify that the number of threads cannot be set to a negative number.
# """
# config = get_active_config( )
# if config.is_threaded( ):
# try: config.set_number_of_threads( -1 );
# except: pass
# else: assert False, "config.set_number_of_threads( -1 )"
#def test_2_ARGS_set_number_of_threads( ):
# """
# Verify that the number of threads cannot be set to zero.
# """
# config = get_active_config( )
# if config.is_threaded( ):
# try: config.set_number_of_threads( 0 );
# except: pass
# else: assert False, "config.set_number_of_threads( 0 )"
def test_USE_set_number_of_threads( ):
"""
Verify that the number of threads set is what is reported.
"""
config = get_active_config( )
tnum = config.get_number_of_threads( )
config.set_number_of_threads( 8 )
assert 8 == config.get_number_of_threads( )
config.set_number_of_threads( tnum )
assert tnum == config.get_number_of_threads( )
def test_USE_set_reads_input_buffer_size( ):
"""
Verify that the reads file chunk size is what is reported.
"""
config = get_active_config( )
bufsz = config.get_reads_input_buffer_size( )
config.set_reads_input_buffer_size( 123456789L )
assert 123456789L == config.get_reads_input_buffer_size( )
config.set_reads_input_buffer_size( bufsz )
assert bufsz == config.get_reads_input_buffer_size( )
# vim: set ft=python sts=4 sw=4 tw=79:
```
#### File: khmer/tests/test_lump.py
```python
import khmer
import screed
import khmer_tst_utils as utils
## Below, 'fakelump.fa' is an artificial data set of 3x1 kb sequences in
## which the last 79 bases are common between the 3 sequences.
def test_fakelump_together():
fakelump_fa = utils.get_test_data('fakelump.fa')
ht = khmer.new_hashbits(32, 1e7, 4)
ht.consume_fasta_and_tag(fakelump_fa)
subset = ht.do_subset_partition(0, 0)
ht.merge_subset(subset)
(n_partitions, n_singletons) = ht.count_partitions()
assert n_partitions == 1, n_partitions
# try loading stop tags from previously saved
def test_fakelump_stop():
fakelump_fa = utils.get_test_data('fakelump.fa')
fakelump_stoptags_txt = utils.get_test_data('fakelump.fa.stoptags.txt')
ht = khmer.new_hashbits(32, 1e7, 4)
ht.consume_fasta_and_tag(fakelump_fa)
for line in open(fakelump_stoptags_txt):
ht.add_stop_tag(line.strip())
subset = ht.do_subset_partition(0, 0, True)
ht.merge_subset(subset)
(n_partitions, n_singletons) = ht.count_partitions()
assert n_partitions == 3, n_partitions
# check specific insertion of stop tag
def test_fakelump_stop2():
fakelump_fa = utils.get_test_data('fakelump.fa')
ht = khmer.new_hashbits(32, 1e7, 4)
ht.consume_fasta_and_tag(fakelump_fa)
ht.add_stop_tag('GGGGAGGGGTGCAGTTGTGACTTGCTCGAGAG')
subset = ht.do_subset_partition(0, 0, True)
ht.merge_subset(subset)
(n_partitions, n_singletons) = ht.count_partitions()
assert n_partitions == 3, n_partitions
# try repartitioning
def test_fakelump_repartitioning():
fakelump_fa = utils.get_test_data('fakelump.fa')
fakelump_fa_foo = utils.get_temp_filename('fakelump.fa.stopfoo')
ht = khmer.new_hashbits(32, 1e7, 4)
ht.consume_fasta_and_tag(fakelump_fa)
subset = ht.do_subset_partition(0, 0)
ht.merge_subset(subset)
(n_partitions, n_singletons) = ht.count_partitions()
assert n_partitions == 1, n_partitions
# now, break partitions on any k-mer that you see more than once
# on big excursions, where big excursions are excursions 40 out
# that encounter more than 82 k-mers. This should specifically
# identify our connected sequences in fakelump...
EXCURSION_DISTANCE=40
EXCURSION_KMER_THRESHOLD=82
EXCURSION_KMER_COUNT_THRESHOLD=1
counting = khmer.new_counting_hash(32, 1e7, 4)
ht.repartition_largest_partition(None, counting,
EXCURSION_DISTANCE,
EXCURSION_KMER_THRESHOLD,
EXCURSION_KMER_COUNT_THRESHOLD)
ht.save_stop_tags(fakelump_fa_foo)
# ok, now re-do everything with these stop tags, specifically.
ht = khmer.new_hashbits(32, 1e7, 4)
ht.consume_fasta_and_tag(fakelump_fa)
ht.load_stop_tags(fakelump_fa_foo)
subset = ht.do_subset_partition(0, 0, True)
ht.merge_subset(subset)
(n_partitions, n_singletons) = ht.count_partitions()
assert n_partitions == 3, n_partitions
``` |
{
"source": "jiarong/seqdep",
"score": 3
} |
#### File: seqdep/scripts/filter-low-comp-fa.py
```python
import sys
import os
import re
import screed
from screed import fasta
from screed import fastq
CUTOFF = 0.2
K = 4
def count_uniq_kmer(seq):
length = len(seq)
kmer_set = set()
for i in xrange(length-K+1):
kmer = seq[i:i+K-1]
kmer_set.add(kmer)
return len(kmer_set)
def main():
'''
Usage: python <thisfile> <infile> <outfile>
'''
if len(sys.argv) != 3:
mes = ('Usage: python {} <infile> <outfile>')
print >> sys.stderr, mes.format(os.path.basename(sys.argv[0]))
sys.exit(1)
infile = sys.argv[1]
outfile = sys.argv[2]
try:
if infile == '-':
fp = sys.stdin
else:
fp = open(infile)
if outfile == '-':
fw = sys.stdout
else:
fw = open(outfile, 'wb')
lowcomp_fw = open('low_complexity.fa', 'wb')
for n, record in enumerate(fasta.fasta_iter(fp)):
name = record['name']
seq = record['sequence']
uniq_kmer_count = count_uniq_kmer(seq)
if uniq_kmer_count * 1.0/(len(seq) - K + 1) < CUTOFF:
lowcomp_fw.write('>{}\n{}\n'.format(name, seq)) #fasta output
continue
fw.write('>{}\n{}\n'.format(name, seq)) #fasta output
try:
n
except NameError:
print >> sys.stderr, '*** No seqs are in seqfile'
except IOError as err:
if outfile == '-':
pass
else:
print >> sys.stderr, '*** {}'.format(err)
sys.exit(1)
if __name__ == '__main__':
main()
``` |
{
"source": "jiarong/uniqprimer",
"score": 2
} |
#### File: uniqprimer/scripts/filter-primer-individual2.py
```python
from __future__ import print_function, unicode_literals
import sys
import os
import yaml
import screed
import primer3
import numpy
def check_gc(seq):
seq = seq.upper()
total = len(seq)
count = 0
for c in seq:
if c in 'GC':
count += 1
return count*1.0/total
def end_gc_count(seq):
seq = seq.upper()
count = 0
for c in seq[-5:]:
if c in 'GC':
count += 1
return count
def has_ambiguous(seq):
c_st = set(seq)
if c_st.issubset(set('ATCG')):
return False
else:
return True
def RC(seq):
rc = seq.translate(
{ord('A'):'T', ord('T'):'A', ord('C'):'G', ord('G'):'C'}
)
return rc[::-1]
def main():
if len(sys.argv) != 3:
mes = '*** Usage: python {} params.config file.uniq2ref.primer'
print(
mes.format(os.path.basename(sys.argv[0])),
file=sys.stderr,
)
sys.exit(1)
configf = sys.argv[1]
primerfile = sys.argv[2]
d = yaml.load(open(configf))
pass_cnt = 0
cnt = 0
for rec in screed.open(primerfile):
cnt += 1
_name = rec.name
name, _contig = _name.split(None, 1)
contig_len = _contig.split('__', 1)[1]
seq = rec.sequence
# primer3 functions only accept byte-strings
seq = seq.encode('utf-8')
#seq = bytes(seq, 'utf-8')
seq_rc = RC(seq)
a_ambi = numpy.array(has_ambiguous(seq), has_ambiguous(seq_rc))
if sum(a_ambi) == 2:
continue
# check tm
tm = primer3.calcTm(seq)
tm_rc = primer3.calcTm(seq_rc)
a_tm = numpy.array(
(tm < d['TM_LOWER'] or tm > d['TM_UPPER']),
(tm_rc < d['TM_LOWER'] or tm_rc > d['TM_UPPER']),
)
if sum(a_tm) == 2:
continue
# check gc
gc = check_gc(seq)
gc_rc = check_gc(seq_rc)
a_gc = numpy.array(
(gc < d['GC_LOWER'] or gc > d['GC_UPPER']),
(gc_rc < d['GC_LOWER'] or gc_rc > d['GC_UPPER']),
)
if sum(a_gc) == 2:
continue
if d['GC_CLAMP']:
c = end_gc_count(seq)
c_rc = end_gc_count(seq_rc)
a_endgc = numpy.array(
c > 3 or c < 1,
c_rc > 3 or c_rc < 1,
)
if sum(a_endgc) == 2:
continue
if d['SS']:
hp = primer3.calcHairpin(seq)
ho = primer3.calcHomodimer(seq)
hp_rc = primer3.calcHairpin(seq_rc)
ho_rc = primer3.calcHomodimer(seq_rc)
orig_pass = (
(hp.dg < d['HP_DG_LIMIT'] or hp.dg > 0)
& (ho.dg < d['DI_DG_LIMIT'] or ho.dg > 0)
)
rc_pass = (
(hp_rc.dg < d['HP_DG_LIMIT'] or hp_rc.dg > 0)
& (ho_rc.dg < d['DI_DG_LIMIT'] or ho_rc.dg > 0)
)
if ho.dg < d['DI_DG_LIMIT'] or ho.dg > 0:
continue
pass_cnt += 1
mes = '>{} contiglen__{};tm__{};gc__{}\n{}'
print(mes.format(name, contig_len, tm, gc, seq), file=sys.stdout)
if cnt == 0:
mes = '*** Empty file detected: {} (file.uniq2ref.primer), skipping..'
print(
mes.format(os.path.basename(primerfile)),
file=sys.stderr,
)
sys.exit(0)
if __name__ == '__main__':
main()
```
#### File: uniqprimer/scripts/filter-primer-pair.py
```python
from __future__ import print_function, unicode_literals
import sys
import os
import numpy
import yaml
import screed
import primer3
def RC(seq):
if isinstance(seq, bytes):
seq = seq.decode('utf-8')
table = {ord('A'):'T', ord('T'):'A', ord('C'):'G', ord('G'):'C'}
rc = seq.translate(table)
return rc[::-1]
def process(configd, desc_d, name, d_pos):
l_f = sorted(set(d_pos['f']))
l_r = sorted(set(d_pos['r']))
a_f = numpy.array(l_f)
a_r = numpy.array(l_r)
range_st = range(configd['LEN_LOWER'], configd['LEN_UPPER'])
for i, n in enumerate(a_f):
f_name = '{}__{}'.format(name, n)
f_d = desc_d['f'][f_name]
f = f_d['seq']
for j, m in enumerate(l_r):
diff = m - n
if diff < 0:
continue
elif diff in range_st:
r_name = '{}__{}'.format(name, m)
r_d = desc_d['r'][r_name]
r = r_d['seq']
# desc e.g:
#contiglen__184765;di__'f';tm__56.9135107847;gc__0.6
if configd['SS']:
# check heterodimer
hetero = primer3.calcHeterodimer(
f.encode('utf-8'),
r.encode('utf-8'),
)
if hetero.dg < configd['DI_DG_LIMIT']:
continue
# forward, f_tm, f_gc, reverse, r_tm, r_gc
mes = (
'{}\t{}\t'
'{}\t{}\t{}\t'
'{}\t{:.1f}\t{:.2f}\t'
'{}\t{:.1f}\t{:.2f}\t'
)
print(
mes.format(
name, f_d['contiglen'],
n, m, diff,
f, float(f_d['tm']), float(f_d['gc']),
r, float(r_d['tm']), float(r_d['gc']),
)
)
# break when out of range since already sorted
elif diff > configd['LEN_UPPER']:
break
def main():
if len(sys.argv) != 3:
mes = '*** Usage: python {} params.config file.filtindi'
print(
mes.format(os.path.basename(sys.argv[0])),
file=sys.stderr
)
sys.exit(1)
configf = sys.argv[1]
infile = sys.argv[2]
configd = yaml.load(open(configf))
print(
'#contig_name\tcontig_len\t'
'f_start\tr_start\tamp_size\t'
'f_seq\tf_tm\tf_gc\t'
'r_seq\tr_tm\tr_gc\t'
)
for rec in screed.open(infile):
name, desc = rec.name.split(None, 1) #1095362_contig_14__2939
# desc e.g: contiglen__184765;tm__56.9135107847;gc__0.6
#dict(i.split('__') for i in desc.split(';'))
_l = name.split('__')
if len(_l) < 3:
print(
'*** {} does have wrong format, skipping..'.format(
name
),
file=sys.stderr,
)
continue
contig_name = _l[0]
pos = int(_l[1])
di = _l[2] # sense: 'f' ; anti-sense: 'r'
name = '{}__{}'.format(contig_name, pos)
try:
if contig_name == prev_contig_name:
d_pos[di].append(pos)
d[di][name] = dict(
i.split('__') for i in desc.split(';')
)
d[di][name]['seq'] = rec.sequence
else:
process(configd, d, prev_contig_name, d_pos)
# initialization
del d_pos
del d
d = {'f':{}, 'r':{}}
d[di][name] = dict(
i.split('__') for i in desc.split(';')
)
d[di][name]['seq'] = rec.sequence
d_pos = {'f':[], 'r':[]}
d_pos[di].append(pos)
prev_contig_name = contig_name
except UnboundLocalError as e:
# for first record
#UnboundLocalError due to prev_contig_name not defined
d = {'f':{}, 'r':{}}
d[di][name] = dict(
i.split('__') for i in desc.split(';')
)
d[di][name]['seq'] = rec.sequence
d_pos = {'f':[], 'r':[]}
d_pos[di].append(pos)
prev_contig_name = contig_name
# process last batch
try:
process(configd, d, prev_contig_name, d_pos)
except UnboundLocalError as e:
mes = (
'*** Empty file detected: {} '
'(file.uniq2ref.primer.filterindi), skipping..'
)
print(
mes.format(os.path.basename(infile)),
file=sys.stderr,
)
sys.exit(0)
if __name__ == '__main__':
main()
```
#### File: uniqprimer/scripts/kmer2primer.py
```python
from __future__ import print_function
import sys
import os
import yaml
import screed
def parse_kmerfile(f):
d = {}
for rec in screed.open(f):
name = rec.name.split(None, 1)[0] #1095362_contig_14__2939
_l = name.split('__')
if len(_l) < 2:
print('*** {} does have wrong format, skipping..'.format(name),
file=sys.stderr)
continue
contig_name = _l[0]
pos = int(_l[1])
_st = d.get(contig_name, set())
d[contig_name] = _st
_st.add(pos) # this way due to shallow copy
return d
def main():
if len(sys.argv) != 4:
mes = (
'*** Usage: python {} params.config '
'concontig.fasta file.uniq2ref'
)
print(
mes.format(os.path.basename(sys.argv[0])),
file=sys.stderr,
)
sys.exit(1)
configf = sys.argv[1]
contigf = sys.argv[2]
kmerfile = sys.argv[3]
configd = yaml.load(open(configf))
d = parse_kmerfile(kmerfile)
if len(d) == 0:
mes = '*** Empty file detected: {} (file.uniq2ref), skipping..'
print(
mes.format(os.path.basename(kmerfile)),
file=sys.stderr,
)
sys.exit(0)
for rec in screed.open(contigf):
name = rec.name
name = name.split(None, 1)[0]
if not name in d:
continue
seq = rec.sequence
for p in d[name]:
assert len(seq) > p, (
'*** seq length < primer start position'
)
primer = seq[p : p+configd['K']]
_mes = '>{}__{} contig__{}\n{}'
print(_mes.format(name, p, len(seq), primer), file=sys.stdout)
if __name__ == '__main__':
main()
```
#### File: uniqprimer/scripts/parse-eprimer3.py
```python
from __future__ import print_function
import sys
import os
def parse(fp):
pair = False
for line in fp:
line = line.rstrip()
if not line:
continue
if line.startswith('# EPRIMER32 RESULTS FOR'):
name = line.split()[-1]
continue
elif line.startswith('#'):
continue
line = line.strip()
if 'PRODUCT SIZE:' in line:
pair = True
size = int(line.split()[-1])
continue
elif 'FORWARD PRIMER' in line:
#FORWARD PRIMER 542890 20 59.97 55.00 CGCGCTTGAATAGTCGTTGG
_, _, s_f, Len_f, Tm_f, GC_f, Seq_f = line.split()
assert pair == True, '*** Check format..'
continue
elif 'REVERSE PRIMER' in line:
#FORWARD PRIMER 542890 20 59.97 55.00 CGCGCTTGAATAGTCGTTGG
_, _, s_r, Len_r, Tm_r, GC_r, Seq_r = line.split()
assert pair == True, '*** Check format..'
assert int(s_r) - int(s_f) + int(Len_r) == size
mes = ('>{}__{}to{}/1 Len_{};Tm_{};GC_{};size_{}\n{}\n'
'>{}__{}to{}/2 Len_{};Tm_{};GC_{};size_{}\n{}')
print(mes.format(name, s_f, s_r, Len_f, Tm_f, GC_f, size, Seq_f,
name, s_f, s_r, Len_r, Tm_r, GC_r, size,
Seq_r))
pair = False
if __name__ == '__main__':
if len(sys.argv) != 2:
mes = '*** Usage: python {} file.ebeprimer'
print(mes.format(os.path.basename(sys.argv[0])), file=sys.stderr)
sys.exit(1)
infile = sys.argv[1]
try:
if infile == '-':
fp = sys.stdin
else:
fp = open(infile)
parse(fp)
finally:
fp.close()
```
#### File: uniqprimer/scripts/prep_refseq_bf.py
```python
from __future__ import print_function, unicode_literals
from __future__ import absolute_import
import argparse
import sys
import os
import json
import threading
import textwrap
import time
import screed
from screed.fasta import fasta_iter
import khmer
from khmer import khmer_args
from khmer.khmer_args import (build_nodegraph_args, report_on_config, info,
add_threading_args, calculate_graphsize,
sanitize_help, DEFAULT_MAX_TABLESIZE)
from khmer.utils import broken_paired_reader, write_record, clean_input_reads
from khmer.kfile import check_file_writable
from khmer.kfile import check_input_files
from khmer.kfile import check_space_for_graph
def kmer_degree(kmer, ht):
s = "ACGT"
left = sum(ht.get('{}{}'.format(i, kmer)) for i in s)
right = sum(ht.get('{}{}'.format(kmer, i)) for i in s)
return left, right
def main():
parser = build_nodegraph_args("find uniq kmer in query compard to refs")
parser.add_argument('ref', nargs='+',
help='fasta sequence file to be loaded in bloom filter')
parser.add_argument('--bfout', default='nodetable.bf',
help='output bloom filter of ref')
args = parser.parse_args()
K = args.ksize
HT_SIZE = args.max_tablesize
N_HT = args.n_tables
# positional
refs = args.ref
start_time = time.time()
print('{} refs to be loaded'.format(len(refs)), file=sys.stderr)
ht = khmer.Nodetable(K, HT_SIZE, N_HT)
end_time = time.time()
secs = end_time - start_time
mes = 'initiation of bloom filter took {:.2f} hours..'
print(mes.format(secs/3600.0), file=sys.stderr)
for index, filename in enumerate(refs):
if index != 0 and index % 100 == 0:
end_time = time.time()
secs = end_time - start_time
mes = '{} refs have been loaded with in {:.2f} hours ..'
print(mes.format(index, secs/3600.0), file=sys.stderr)
try:
ht.consume_seqfile(filename)
except OSError as e:
mes = ('*** Skipping due to OSError (machine or system problem):'
' {}\n'
'*** Detailed error message:\n'
'*** {}')
print(mes.format(os.path.basename(filename), str(e)),
file=sys.stderr)
continue
print('Saving bloom filter to {}..'.format(args.bfout),
file=sys.stderr)
ht.save(args.bfout)
# Change 0.2 only if you really grok it. HINT: You don't.
fp_rate = khmer.calc_expected_collisions(ht)
mes = 'fp rate estimated to be {:1.3f}'
print(mes.format(fp_rate), file=sys.stderr)
if fp_rate > 0.01:
mes = ('**\n'
'** ERROR: the counting hash is too small for\n'
'** refs. Increase hashsize/num ht.\n'
'**\n'
'** Do not use these results!!')
sys.exit(-1)
n_unique1 = ht.n_unique_kmers()
mes = ('Unique kmer:\t{}\n')
print(mes.format(n_unique1), file=sys.stderr)
if __name__ == '__main__':
main()
``` |
{
"source": "Jiar/python3-test",
"score": 3
} |
#### File: Jiar/python3-test/decorator.py
```python
import functools
def log1(func):
def wrapper(*args, **kw):
print('log1 %s():' % func.__name__)
return func(*args, **kw)
return wrapper
@log1
def aaa():
print('aaa def')
aaa()
print(aaa.__name__)
print()
def log2(text):
def decorator(func):
def wrapper(*args, **kw):
print('%s %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
@log2('log2')
def bbb():
print('bbb def')
bbb()
print(bbb.__name__)
print()
def log3(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('log3 %s():' % func.__name__)
return func(*args, **kw)
return wrapper
@log3
def ccc():
print("ccc def")
ccc()
print(ccc.__name__)
print()
def log4(text):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('%s %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
@log4('log4')
def ddd():
print("ddd def")
ddd()
print(ddd.__name__)
print()
def log(*args):
text = args[0] if isinstance(args[0],str) else 'log'
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('%s before %s():' % (text, func.__name__))
result = func(*args, **kw)
print('%s after %s():' % (text, func.__name__))
return result
return wrapper
return decorator if isinstance(args[0],str) else decorator(args[0])
# 直接是修饰方法
@log
def test1():
print('test1')
test1()
print()
# log('custom') 方法的返回值才是修饰方法
@log('custom')
def test2():
print('test2')
test2()
print()
```
#### File: Jiar/python3-test/function.py
```python
def add(x, y, f):
return f(x) + f(y)
print(add(-5, 6, abs))
```
#### File: Jiar/python3-test/generator.py
```python
def fib(max):
n, a, b = 0, 0, 1
while n < max:
yield b
a, b = b, a + b
n = n + 1
return 'done'
temp = fib(10)
print(next(temp))
print(next(temp))
print(next(temp))
print(next(temp))
print(next(temp))
print(next(temp))
```
#### File: Jiar/python3-test/recursive.py
```python
def fact(n):
if n==1:
return 1
return n * fact(n - 1)
# 栈溢出
fact(1000)
```
#### File: Jiar/python3-test/rest.py
```python
class Chain(object):
def __init__(self, path='GET'):
self._path = path
# 访问属性时触发(path为属性名)、调用方法时触发(path为方法名)
def __getattr__(self, path):
print(path)
return Chain('%s/%s' % (self._path, path))
# 调用方法时触发
def __call__(self, attr):
print(attr)
return Chain('%s/%s' % (self._path, attr))
def __str__(self):
return self._path
__repr__ = __str__
print(Chain().users('michael').group('student').repos)
``` |
{
"source": "Jiarre/NGN-Project_WOL",
"score": 3
} |
#### File: Jiarre/NGN-Project_WOL/controllerHost.py
```python
import os
import subprocess
import socket
import netifaces
import re
# Personal define of Ethernet Packet Type following /usr/include/linux/if_ether.h
ETH_P_WOL = 0x0842
WOL_SIZE = 116 # Size of WOL packet without optional headers
MAC_BROADCAST_B = bytes.fromhex("F" * 12)
def ipt_roules(status):
# Get local ip
ip = subprocess.check_output("hostname -I | awk '{print $1}'", shell=True).decode("utf-8")[:-1]
if status == "DOWN":
# Add Rule
os.system("iptables -A INPUT -d " + ip + " -j REJECT 2> /dev/null")
os.system("iptables -A OUTPUT -s " + ip + " -j REJECT 2> /dev/null")
else:
# Delete Rule
os.system("iptables -D INPUT -d " + ip + " -j REJECT 2> /dev/null")
os.system("iptables -D OUTPUT -s " + ip + " -j REJECT 2> /dev/null")
print("IPTABLES rules updated")
def get_status(hostname):
sdir = str(os.getenv("statusdir"))
sfilep = sdir + "/" + hostname
sfile = open(sfilep, 'r')
status = sfile.read()
sfile.close()
return status
def set_status(hostname, status):
sdir = str(os.getenv("statusdir"))
sfilep = sdir + "/" + hostname
sfile = open(sfilep, 'w')
sfile.write(status)
sfile.close()
def update_status(hostname):
status = get_status(hostname)
if status == "DOWN":
status = "UP"
elif status == "UP":
status = "DOWN"
else:
print(hostname + " has an Invalid status")
return
set_status(hostname, status)
ipt_roules(status)
print(hostname + " is now " + status)
def check_mac(mac):
return re.fullmatch(
'^([A-F0-9]{2}(([:][A-F0-9]{2}){5}|([-][A-F0-9]{2}){5})|([\s][A-F0-9]{2}){5})|'
'([a-f0-9]{2}(([:][a-f0-9]{2}){5}|([-][a-f0-9]{2}){5}|([\s][a-f0-9]{2}){5}))$',
mac)
def get_mac_arp(hostname, dnssrv=False):
try:
ipaddr = socket.gethostbyname(hostname)
except socket.gaierror:
if not dnssrv:
print("Error resolving hostname" + "\n"
"Is DNS server running?")
if input("Are you running mininet topology without DNS? [Y/N]: ").lower() == 'y':
if re.fullmatch('^h\d+$', hostname) is not None:
# Get index of the host and create MAC address from that with padding (MAX Host=255)
macbytes = bytes([int(hostname.replace('h', ''))]).rjust(6, b'\x00')
return macbytes, None
else:
print("Provided hostname is not a well formed mininet hostname")
return None, None
else:
print("Provide full MAC address instead of hostname")
return None, None
else:
print("Provided hostname is not know by DNS server")
return None, None
neigh = subprocess.check_output(f"ip neigh show {ipaddr}", shell=True).decode("utf-8")
if re.search('lladdr ([a-f0-9]{2}([:][a-f0-9]{2}){5})', neigh) is not None:
macaddr = re.search('lladdr ([a-f0-9]{2}([:][a-f0-9]{2}){5})', neigh).group(1)
macbytes = bytes.fromhex(macaddr.replace(':', ''))
return macbytes, macaddr
else:
return None, None
def request_intf(dnssrv=False):
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
# If DNSServer request this function don't ask which intf
if (len(socket.if_nameindex()) > 2) and (dnssrv is False):
for i in socket.if_nameindex():
print(str(i[0]) + " -> " + i[1])
idx = int(input("Choose the index of the interface where send the packet: "))
else:
# Speed up in best case scenarios, excluding loopback interface
idx = 2
interface = socket.if_indextoname(idx)
return s, interface
def send_request_to_dnsserver(hostname):
s, interface = request_intf()
# Specification seams not working, only interfeace is necessary
s.bind((interface, 0x1112, socket.PACKET_BROADCAST))
data, hostname = create_dns_packet(s.getsockname()[4], hostname.encode('utf-16'))
if data is not None:
s.send(data)
return
def get_request_to_dnsserver():
# Funzione di ascolto per DNSServer
s_rec = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x1112))
# s_rec.bind((interface, 0)) # not necessary -> listening on all interfaces
size = 255
payload = s_rec.recv(size)
hostname = payload.decode('utf-16')[8:].strip('\x00')
mac_dst_bin, mac_dst = get_mac_arp(hostname, dnssrv=True)
print("Request to wake host {} with mac {}".format(hostname, mac_dst))
if mac_dst_bin is not None:
# Send Packet without asking for the interface on DNSServer
send_packet(mac_dst_bin, dnssrv=True)
else:
print("Hostname Incorrect")
return
def create_packet(mac_src, machost_dst):
eth_type = bytes.fromhex("1111") # int -> 4369
mac_dst = MAC_BROADCAST_B
if machost_dst is None:
wol_dst = input("Provide the hostname or complete MAC address\n"
"(accepted separator separator [:-\]) of the machine to WOL: ")
# Check mac address format
addr = check_mac(wol_dst)
# Match all posssibile hostname (no only numbers, space etc)
hostname = re.fullmatch('^(?!\d*$)\w+\.*\w+$', wol_dst)
try:
# 1 match, or the MAC is invalid
if addr is not None:
# Remove mac separator [:-\s] and convert to bytes
data = bytes.fromhex(wol_dst.replace(wol_dst[2], ''))
else:
if hostname is None:
raise ValueError('Incorrect MAC address format or hostname')
elif hostname.group(0) == get_hostname():
raise ValueError(f'{hostname.group(0)} is this host')
else:
# Return data=None as a flag to delegate DNSServer
return None, wol_dst
except ValueError as verr:
print("Exception caught: "+repr(verr))
exit(-1)
else:
data = machost_dst
# The message is compose by the mac address of machine that would receive the WOL Magic Packet
payload = mac_dst + mac_src + eth_type + data
return payload, None
def create_dns_packet(mac_src, machost_dst):
eth_type = bytes.fromhex("1112")
mac_dst = MAC_BROADCAST_B
data = machost_dst
# The message is compose by the mac address of machine that would receive the WOL Magic Packet
payload = mac_dst + mac_src + eth_type + data
return payload, None
def send_packet(mac_dst, dnssrv=False):
s, interface = request_intf(dnssrv)
# Specification seams not working, only interfeace is necessary
s.bind((interface, 0x1111, socket.PACKET_BROADCAST))
data, hostname = create_packet(s.getsockname()[4], mac_dst)
# Check if the 0x1111 packet is delegated to DNSServer
if data is not None:
s.send(data)
else:
# Trying to get mac from local arp
h = get_mac_arp(hostname)[0]
if h is not None:
send_packet(h)
else:
# Delegate DNSServer
send_request_to_dnsserver(hostname)
def check_packet(data) -> bool:
res = False
for i in netifaces.interfaces():
i_mac = str(netifaces.ifaddresses(i)[netifaces.AF_LINK][0].get('addr')).replace(':', '')
if data[0:6].hex() == i_mac:
mac_src = data[6:12].hex(':')
if check_mac(mac_src):
if int.from_bytes(data[12:14], "big") == ETH_P_WOL:
if data[14:20] == MAC_BROADCAST_B:
if data[20:].hex() == i_mac*16:
print("Packet received on " + i + " interface from " + mac_src)
res = True
return res
def get_hostname() -> str:
ifaces = netifaces.interfaces()
# Check the nuber of host (of mininet)
hostname = re.search('^(h\d+)-eth0$', ifaces[1]).group(1)
if hostname != "":
return hostname
else:
print("Error recognising hostname")
def get_magic_packet():
s_rec = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(ETH_P_WOL))
# s_rec.bind((interface, 0)) # not necessary -> listening on all interfaces
size = WOL_SIZE
payload = s_rec.recv(size)
if check_packet(payload):
hostname = get_hostname()
if hostname != "":
update_status(hostname)
else:
print("Error recognising hostname")
# END
``` |
{
"source": "Jiarre/NGN",
"score": 2
} |
#### File: Z_Progetto/old/mn_topo.py
```python
import os
import shutil
import sys
import time
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.node import RemoteController, OVSSwitch
from mininet.term import makeTerm
SDIR = "/tmp/NGN/hosts"
TONULL = "&>/dev/null"
DHCP = False
if len(sys.argv) > 1:
if sys.argv[1] == '-dhcp':
DHCP = True
class MinimalTopo( Topo ):
def build( self ):
hosts = []
h1 = self.addHost('h1', ip=None)
h2 = self.addHost('h2', ip=None)
h3 = self.addHost('h3', ip=None)
h4 = self.addHost('h4', ip=None)
h5 = self.addHost('h5', ip=None)
h6 = self.addHost('h6', ip=None)
h7 = self.addHost('h7', ip=None)
h8 = self.addHost('h8', ip=None)
h9 = self.addHost('h9', ip=None)
h10 = self.addHost('h10', ip=None)
h11 = self.addHost('h11', ip=None)
h12 = self.addHost('h12', ip=None)
hosts.append(h1)
hosts.append(h2)
hosts.append(h3)
hosts.append(h4)
hosts.append(h5)
hosts.append(h6)
hosts.append(h7)
hosts.append(h8)
hosts.append(h9)
hosts.append(h10)
hosts.append(h11)
hosts.append(h12)
# Create switches
s1 = self.addSwitch( 's1' )
s2 = self.addSwitch( 's2' )
s3 = self.addSwitch( 's3' )
s4 = self.addSwitch( 's4' )
# Add links between the switch and each host
self.addLink( s1, h1 )
self.addLink( s1, h2 )
self.addLink( s1, h3 )
self.addLink( s2, h4 )
self.addLink( s2, h5 )
self.addLink( s2, h6 )
self.addLink( s3, h7 )
self.addLink( s3, h8 )
self.addLink( s3, h9 )
self.addLink( s4, h10 )
self.addLink( s4, h11 )
self.addLink( s4, h12 )
self.addLink( s1, s2 )
self.addLink( s3, s4 )
self.addLink( s3, s2 )
print("*** Setting files and directories")
os.umask(0000)
if os.path.exists(SDIR):
shutil.rmtree(SDIR)
os.makedirs(f"{SDIR}/LOGs")
os.environ["statusdir"] = SDIR
for h in hosts:
host = str(h)
fileS = f"{SDIR}/{host}"
fileL = f"{SDIR}/LOGs/{host}.log"
# Set status file
try:
os.close(os.open(fileL, os.O_CREAT | os.O_WRONLY, 0o777))
f = open(os.open(fileS, os.O_CREAT | os.O_WRONLY, 0o777), 'w')
if host == "h1":
# Only h1 stars UP
f.write("UP")
else:
f.write("DOWN")
f.close()
except OSError:
print("Failed creating files")
else:
print(f"Files of host {host} created")
def runMinimalTopo():
# Create an instance of our topology
topo = MinimalTopo()
# Create a network based on the topology using OVS and controlled by
# a remote controller.
net = Mininet(
topo=topo,
controller=lambda name: RemoteController( name, ip='127.0.0.1' ),
switch=OVSSwitch,
autoSetMacs=True )
# Actually start the network
net.start()
# Drop the user in to a CLI so user can run commands.
node1 = net.getNodeByName("s1")
node2 = net.getNodeByName("s2")
node3 = net.getNodeByName("s3")
node4 = net.getNodeByName("s4")
print("*** Setting basic flow on switches")
node1.cmd("sudo ovs-ofctl add-flow s1 dl_type=0x1111,action=controller")
node2.cmd("sudo ovs-ofctl add-flow s2 dl_type=0x1111,action=controller")
node3.cmd("sudo ovs-ofctl add-flow s3 dl_type=0x1111,action=controller")
node4.cmd("sudo ovs-ofctl add-flow s4 dl_type=0x1111,action=controller")
print("*** Setting up bridge network")
node1.cmd('sudo ovs-vsctl add-port s1 eth1')
if not DHCP:
print("*** Setting up static IP")
net.getNodeByName("h1").setIP(ip="192.168.1.11", prefixLen=24)
net.getNodeByName("h2").setIP(ip="192.168.1.12", prefixLen=24)
net.getNodeByName("h3").setIP(ip="192.168.1.13", prefixLen=24)
net.getNodeByName("h4").setIP(ip="192.168.1.21", prefixLen=24)
net.getNodeByName("h5").setIP(ip="192.168.1.22", prefixLen=24)
net.getNodeByName("h6").setIP(ip="192.168.1.23", prefixLen=24)
net.getNodeByName("h7").setIP(ip="192.168.1.31", prefixLen=24)
net.getNodeByName("h8").setIP(ip="192.168.1.32", prefixLen=24)
net.getNodeByName("h9").setIP(ip="192.168.1.33", prefixLen=24)
net.getNodeByName("h10").setIP(ip="192.168.1.41", prefixLen=24)
net.getNodeByName("h11").setIP(ip="192.168.1.42", prefixLen=24)
net.getNodeByName("h12").setIP(ip="192.168.1.43", prefixLen=24)
print("*** Executing background scripts")
else:
print("*** Executing background scripts and dhcp request")
for h in net.hosts:
if DHCP:
# Unable to modify config file for evey host, so change temporary hostname
h.cmd(f"hostname {str(h)}")
h.cmd(f"dhclient -4") # +h.defaultIntf().name #-e HOST={str(h)} -cf ./configs/mn_dhclient.conf
# With parantesesis invade the mininet terminal of single host and get signals
h.cmd(f"python3 backgroundHost.py {str(h)} > {SDIR}/LOGs/{str(h)}.log &")
# Start script slowly because jumps host if faster
time.sleep(0.1)
# net.terms += makeTerm(h, f"Background script on {str(h)}", cmd=f"python3 backgroundHost.py {str(h)}")
print(f"Started {str(h)} script")
print("All scripts started")
# Reset hostname
node1.cmd("hostname comnetsemu")
# Run the summary status script NOT WORKING (not return the control to parent idk
# command = f"xterm -T 'Status of all hosts' -e 'watch -n 1 python3 getStatusHosts.py'"
# os.system(command + TONULL + " &")
CLI(net)
# After the user exits the CLI, shutdown the network.
net.stop()
shutil.rmtree(SDIR)
if __name__ == '__main__':
# This runs if this file is executed directly
setLogLevel( 'info' )
runMinimalTopo()
# Allows the file to be imported using `mn --custom # --topo minimal`
topos = {
'minimal': MinimalTopo
}
```
#### File: Z_Progetto/old/Provabridge.py
```python
import os
from mininet.net import Mininet
from mininet.node import Controller
from mininet.cli import CLI
from mininet.link import Intf
from mininet.log import setLogLevel, info
def myNetwork():
net = Mininet( topo=None, build=False)
info( '*** Adding controller\n' )
net.addController(name='c0')
info( '*** Add switches\n')
s1 = net.addSwitch('s1')
Intf( 'eth1', node=s1 )
s1.cmd('ovs-vsctl add-port s1 eth1')
#s1.cmd('ifconfig s1 192.168.33.30') #not works
info( '*** Add hosts\n')
h1 = net.addHost('h1', ip='192.168.33.31/24')
h2 = net.addHost('h2', ip='192.168.33.32/24')
info( '*** Add links\n')
net.addLink(h1, s1)
net.addLink(h2, s1)
info( '*** Starting network\n')
net.start()
os.system("ifconfig s1 192.168.33.30")
#h1.cmdPrint('dhclient -4 '+h1.defaultIntf().name)
CLI(net)
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
myNetwork()
``` |
{
"source": "JiaruiFeng/pyG-GNN-framework",
"score": 3
} |
#### File: JiaruiFeng/pyG-GNN-framework/layers.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from copy import deepcopy as c
import math
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import add_self_loops, degree, softmax
from torch_geometric.nn.inits import glorot, zeros,kaiming_uniform
def clones( module, N):
"""Layer clone function, used for concise code writing
Args:
module: the layer want to clone
N: the time of clone
"""
return nn.ModuleList(c(module) for _ in range(N))
class GCNLayer(MessagePassing):
"""
Graph convolution layer with edge attribute
Args:
input_dim(int): the size of input feature
output_dim(int): the size of output feature
aggr(str): aggregation function in message passing network
num_edge_type(int): number of edge type, 0 indicate no edge attribute
"""
def __init__(self,input_dim,output_dim,aggr="add",num_edge_type=0):
super(GCNLayer, self).__init__()
self.aggr=aggr
self.proj=nn.Linear(input_dim,output_dim,bias=False)
self.bias=nn.Parameter(torch.Tensor(output_dim))
if num_edge_type>0:
self.edge_embedding = torch.nn.Embedding(num_edge_type, output_dim)
nn.init.xavier_uniform_(self.edge_embedding.weight.data)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.proj.weight.data)
zeros(self.bias)
def forward(self,x,edge_index,edge_attr=None):
#add self loops in the edge space
edge_index,_ = add_self_loops(edge_index, num_nodes = x.size(0))
x = self.proj(x)
row, col = edge_index
deg = degree(col, x.size(0), dtype=x.dtype)
deg_inv_sqrt = deg.pow(-0.5)
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
if edge_attr is not None:
#add features corresponding to self-loop edges, set as zeros.
self_loop_attr = torch.zeros(x.size(0),dtype=torch.long)
self_loop_attr = self_loop_attr.to(edge_attr.device).to(edge_attr.dtype)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim = 0)
edge_embeddings = self.edge_embedding(edge_attr)
return self.propagate(edge_index, x=x, norm=norm,edge_attr=edge_embeddings)
else:
return self.propagate(edge_index, x=x,norm=norm, edge_attr=None)
def message(self, x_j,edge_attr,norm):
if edge_attr is not None:
return norm.view(-1,1)*(x_j+edge_attr)
else:
return norm.view(-1,1)*x_j
def update(self,aggr_out):
return F.relu(aggr_out)
# GAT torch_geometric implementation
#Adapted from https://github.com/snap-stanford/pretrain-gnns
class GATLayer(MessagePassing):
"""Graph attention layer with edge attribute
Args:
input_dim(int): the size of input feature
output_dim(int): the size of output feature
head(int): the number of head in multi-head attention
negative_slope(float): the slope in leaky relu function
aggr(str): aggregation function in message passing network
num_edge_type(int): number of edge type, 0 indicate no edge attribute
"""
def __init__(self, input_dim,output_dim,head, negative_slope=0.2, aggr = "add",num_edge_type=0):
super(GATLayer, self).__init__(node_dim=0)
assert output_dim%head==0
self.k=output_dim//head
self.aggr = aggr
self.output_dim = output_dim
self.head = head
self.negative_slope = negative_slope
self.weight_linear = nn.Linear(input_dim, output_dim,bias=False)
self.att = torch.nn.Parameter(torch.Tensor(1, head, 2 * self.k))
self.bias = torch.nn.Parameter(torch.Tensor(output_dim))
if num_edge_type>0:
self.edge_embedding = torch.nn.Embedding(num_edge_type, output_dim)
nn.init.xavier_uniform_(self.edge_embedding.weight.data)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight_linear.weight.data)
glorot(self.att)
zeros(self.bias)
def forward(self, x, edge_index,edge_attr=None):
#add self loops in the edge space
edge_index,_ = add_self_loops(edge_index, num_nodes = x.size(0))
x = self.weight_linear(x).view(-1, self.head, self.k) # N * head * k
if edge_attr is not None:
#add features corresponding to self-loop edges, set as zeros.
self_loop_attr = torch.zeros(x.size(0),dtype=torch.long)
self_loop_attr = self_loop_attr.to(edge_attr.device).to(edge_attr.dtype)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim = 0)
edge_embeddings = self.edge_embedding(edge_attr)
return self.propagate(edge_index, x=x, edge_attr=edge_embeddings)
else:
return self.propagate(edge_index, x=x, edge_attr=None)
def message(self, edge_index, x_i, x_j, edge_attr):
if edge_attr is not None:
edge_attr = edge_attr.view(-1, self.head, self.k)
x_j += edge_attr
alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1) # E * head
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, edge_index[0])
return x_j * alpha.view(-1, self.head, 1) #E * head * k
def update(self, aggr_out):
aggr_out = aggr_out.view(-1,self.output_dim)
aggr_out = aggr_out + self.bias
return F.relu(aggr_out)
#Adapted from https://github.com/snap-stanford/pretrain-gnns
class GINLayer(MessagePassing):
"""
GIN layer to incorporate edge information.
Args:
emb_dim (int): dimensionality of embeddings for nodes and edges.
eps(float): initial epsilon.
train_eps(bool): whether the epsilon is trainable
aggr(str): aggregation function in message passing network
num_edge_type(int): number of edge type, 0 indicate no edge attribute
See https://arxiv.org/abs/1810.00826
"""
def __init__(self, emb_dim, eps=0.,train_eps=False, aggr="add",num_edge_type=0):
super(GINLayer, self).__init__()
# multi-layer perceptron
self.mlp = torch.nn.Sequential(torch.nn.Linear(emb_dim, 2 * emb_dim), torch.nn.ReLU(),
torch.nn.Linear(2 * emb_dim, emb_dim))
if num_edge_type > 0:
self.edge_embedding = torch.nn.Embedding(num_edge_type, emb_dim)
nn.init.xavier_uniform_(self.edge_embedding.weight.data)
self.aggr = aggr
self.initial_eps = eps
if train_eps:
self.eps = torch.nn.Parameter(torch.Tensor([eps]))
else:
self.register_buffer('eps', torch.Tensor([eps]))
self.reset_parameters()
def weights_init(self,m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight.data)
zeros(m.bias.data)
def reset_parameters(self):
self.mlp.apply(self.weights_init)
self.eps.data.fill_(self.initial_eps)
def forward(self, x, edge_index, edge_attr=None):
# don't need to add self loop in GIN
#edge_index,_ = add_self_loops(edge_index, num_nodes=x.size(0))
if edge_attr is not None:
edge_embeddings = self.edge_embedding(edge_attr)
x_n= self.propagate(edge_index, x=x, edge_attr=edge_embeddings)
else:
x_n=self.propagate(edge_index, x=x, edge_attr=None)
return self.mlp((1+self.eps)*x+x_n)
def message(self, x_j, edge_attr):
if edge_attr is not None:
return x_j + edge_attr
else:
return x_j
def update(self, aggr_out):
return aggr_out
class GraphSAGELayer(MessagePassing):
"""GraphSAGE layer with edge attributes
Args:
input_dim(int): the size of input feature
output_dim(int): the size of output feature
aggr(str): aggregation function in message passing network
num_edge_type(int): number of edge type, 0 indicate no edge attribute
"""
def __init__(self,input_dim,output_dim,aggr="mean",num_edge_type=0):
super(GraphSAGELayer, self).__init__()
self.aggr=aggr
self.proj=nn.Linear(input_dim*2,output_dim,bias=False)
self.bias=nn.Parameter(torch.Tensor(output_dim))
if num_edge_type > 0:
self.edge_embedding = torch.nn.Embedding(num_edge_type, input_dim)
torch.nn.init.xavier_uniform_(self.edge_embedding.weight.data)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.proj.weight.data)
zeros(self.bias)
def forward(self,x,edge_index,edge_attr=None):
# don't need to add self loop in GraphSAGE
#edge_index,_ = add_self_loops(edge_index, num_nodes=x.size(0))
if edge_attr is not None:
edge_embeddings = self.edge_embedding(edge_attr)
x_n= self.propagate(edge_index, x=x, edge_attr=edge_embeddings)
else:
x_n=self.propagate(edge_index, x=x, edge_attr=None)
return F.normalize(F.relu(self.proj(torch.cat([x,x_n],dim=-1))+self.bias),p=2,dim=-1)
def message(self, x_j, edge_attr):
if edge_attr is not None:
return x_j + edge_attr
else:
return x_j
def update(self, aggr_out):
return aggr_out
#GAT torch implementation
def masking_softmax(att,A):
"""masking softmax in GAT layer
Args:
att: the unmasked attention score matrix
A: masking matrix, <=0 for masking position, >0 for not masking position
"""
masking=A>0 #B * N * N
masking=masking.int()
masking=masking.unsqueeze(1) #B * 1 * N * N
att=att.masked_fill_(masking==0,-1e30)
return F.softmax(att,dim=-1) #B * h * N * N
class GATLayerTorch(nn.Module):
"""GAT layer
Args:
input_size:the size of input feature
output_size:the size of output feature
head: number of head in multi-head attention
"""
def __init__(self,input_size,output_size,head):
super(GATLayerTorch, self).__init__()
self.k=output_size//head
self.head=head
self.proj=nn.Linear(input_size,output_size,bias=False)
self.att_proj_list=clones(nn.Linear(2*self.k,1),self.head)
def forward(self,x,A):
B=x.size(0)
x=self.proj(x) # B * N * H
x=x.view(B,-1,self.head,self.k).transpose(1,2).contiguous() # B * h * N * k
att_input=self.attention_input(x) #h * B * N * N * 2k
att=torch.cat([F.leaky_relu(self.att_proj_list[i](att_input[i]),negative_slope=0.2)for i in range(att_input.size(0))],dim=-1) # B * N * N * h
att=masking_softmax(att.permute(0,3,1,2),A) # B * h * N * N
x=F.relu(torch.matmul(att,x)) # B * h * N * k
x=x.transpose(1,2).contiguous().view(B,-1,self.k*self.head)
return x # B * N * hk(H)
def attention_input(self,x):
B,h,N,k=x.size()
Wi=x.repeat_interleave(N,dim=2) # B * h * (N*N) * k
Wj=x.repeat(1,1,N,1) # B * h * (N*N) * k
cat=torch.cat([Wi,Wj],dim=-1) #B * h * (N*N) * 2k
return cat.view(B,h,N,N,2*k).transpose(0,1) # h * B * N * N * 2k
``` |
{
"source": "jiaruixu/chameleon_recsys",
"score": 2
} |
#### File: nar_module/nar/nar_model.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import math
import numpy as np
from scipy.sparse import csr_matrix
from itertools import permutations
from collections import Counter
from copy import deepcopy
from tensorflow.contrib.layers import xavier_initializer, variance_scaling_initializer
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from .metrics import HitRate, MRR
from .utils import merge_two_dicts, get_tf_dtype, hash_str_to_int
from .evaluation import compute_metrics
ARTICLE_REQ_FEATURES = ['article_id', 'created_at_ts']
SESSION_REQ_SEQ_FEATURES = ['item_clicked', 'event_timestamp']
def get_embedding_size(unique_val_count, const_mult=8):
return int(math.floor(const_mult * unique_val_count**0.25))
def log_base(x, base):
numerator = tf.log(tf.to_float(x))
denominator = tf.log(tf.constant(base, dtype=numerator.dtype))
return numerator / denominator
def log_1p(x, base):
return log_base(x+1, base)
def tf_ndcg_at_k(r, k):
def _tf_dcg_at_k(r, k):
last_dim_size = tf.minimum(k, tf.shape(r)[-1])
input_rank = tf.rank(r)
input_shape = tf.shape(r)
slice_begin = tf.zeros([input_rank], dtype=tf.int32)
slice_size = tf.concat([input_shape[:-1], [last_dim_size]], axis=0)
r = tf.slice(tf.to_float(r),
begin=slice_begin,
size=slice_size)
last_dim_size = tf.shape(r)[-1]
dcg = tf.reduce_sum(tf.subtract(tf.pow(2., r), 1) / log_base(tf.range(2, last_dim_size + 2), 2.), axis=-1)
return dcg
sorted_values, sorted_idx = tf.nn.top_k(r, k=tf.shape(r)[-1])
idcg = _tf_dcg_at_k(sorted_values, k)
ndcg = _tf_dcg_at_k(r, k) / idcg
#Filling up nans (due to zeroed IDCG) with zeros
ndcg = tf.where(tf.is_nan(ndcg), tf.zeros_like(ndcg), ndcg)
return ndcg
def cartesian_product(a, b, axis):
a_rank = tf.rank(a)
a_dim = tf.shape(a)[axis]
b_rank = tf.rank(b)
b_dim = tf.shape(b)[axis]
axis_a_repeat = tf.sparse_to_dense(sparse_indices=[axis+1], sparse_values=[b_dim], output_shape=[a_rank+1], default_value=1)
tile_a = tf.tile(tf.expand_dims(a, axis+1), axis_a_repeat)
axis_b_repeat = tf.sparse_to_dense(sparse_indices=[axis], sparse_values=[a_dim], output_shape=[b_rank+1], default_value=1)
tile_b = tf.tile(tf.expand_dims(b, axis), axis_b_repeat)
cart_prod = tf.concat([tile_a, tile_b], axis=-1)
return cart_prod
def shuffle_columns(x):
batch_size = tf.shape(x)[0]
counter = tf.constant(0)
m0 = tf.zeros(shape=[0, tf.shape(x)[1]], dtype=x.dtype)
cond = lambda i, m: i < batch_size
body = lambda i, m: [i+1, tf.concat([m, tf.expand_dims(tf.random_shuffle(x[i]), 0)], axis=0)]
_, shuffled_columns = tf.while_loop(
cond, body, loop_vars=[counter, m0],
shape_invariants=[counter.get_shape(), tf.TensorShape([None,None])])
return shuffled_columns
def get_tf_dtype(dtype):
if dtype == 'int':
tf_dtype = tf.int64
elif dtype == 'float':
tf_dtype = tf.float32
#elif dtype == 'string':
# tf_dtype = tf.string
else:
raise Exception('Invalid dtype "{}"'.format(dtype))
return tf_dtype
class NARModuleModel():
def __init__(self, mode, inputs, labels,
session_features_config,
articles_features_config,
batch_size,
lr, keep_prob, negative_samples, negative_sample_from_buffer,
content_article_embeddings_matrix,
rnn_num_layers=1,
cosine_loss_gamma=1.0,
reg_weight_decay=0.0,
recent_clicks_buffer_size = 1000,
articles_metadata=None,
plot_histograms=False,
metrics_top_n=5,
elapsed_days_smooth_log_base=1.3,
popularity_smooth_log_base=2.0,
CAR_embedding_size=256,
rnn_units=256,
max_cardinality_for_ohe=30
):
self.lr = lr
self.keep_prob = keep_prob
self.elapsed_days_smooth_log_base = elapsed_days_smooth_log_base
self.popularity_smooth_log_base = popularity_smooth_log_base
self.is_training = (mode == tf.estimator.ModeKeys.TRAIN)
self.negative_samples = negative_samples
self.negative_sample_from_buffer = negative_sample_from_buffer
self.rnn_num_layers = rnn_num_layers
self.metrics_top_n = metrics_top_n
self.plot_histograms = plot_histograms
self.reg_weight_decay = reg_weight_decay
self.batch_size = tf.constant(batch_size, dtype=tf.int32)
self.session_features_config = session_features_config
self.articles_features_config = articles_features_config
self.max_cardinality_for_ohe = max_cardinality_for_ohe
with tf.variable_scope("article_content_embeddings"):
#self.articles_metadata_columns_dict = dict([(column, id) for id, column in enumerate(articles_metadata_columns)])
#self.articles_metadata = tf.constant(articles_metadata_values,
# shape=articles_metadata_values.shape,
# dtype=tf.int64)
self.articles_metadata = {}
#Converting Article metadata feature vectors to constants in the graph, to avoid many copies
for feature_name in articles_metadata:
self.articles_metadata[feature_name] = tf.constant(articles_metadata[feature_name],
shape=articles_metadata[feature_name].shape,
dtype=get_tf_dtype(articles_features_config[feature_name]['dtype']))
self.items_vocab_size = articles_features_config['article_id']['cardinality']
#self.publishers_vocab_size = articles_features_config['sequence_features']['publisher_id']['cardinality']
#self.categories_vocab_size = articles_features_config['sequence_features']['category_id']['cardinality']
self.content_article_embeddings_matrix = \
tf.constant(content_article_embeddings_matrix,
shape=content_article_embeddings_matrix.shape,
dtype=tf.float32)
with tf.variable_scope("articles_status"):
self.articles_pop = tf.placeholder(name="articles_pop",
shape=[self.items_vocab_size],
dtype=tf.int64)
tf.summary.scalar('total_items_clicked', family='stats', tensor=tf.count_nonzero(self.articles_pop))
self.articles_pop_recently_clicked = tf.placeholder(name="articles_pop_recently_clicked",
shape=[self.items_vocab_size],
dtype=tf.int64)
tf.summary.scalar('total_items_clicked_recently', family='stats', tensor=tf.count_nonzero(self.articles_pop_recently_clicked))
self.pop_recent_items_buffer = tf.placeholder(name="pop_recent_items",
shape=[recent_clicks_buffer_size],
dtype=tf.int64)
#PS: variance_scaling_initializer() is recommended for RELU activations in https://arxiv.org/abs/1502.01852
#whilst xavier_initializer is recommended for tanh activations
with tf.variable_scope("main", initializer=xavier_initializer()):
#Initializes CAR item embeddings variable
self.create_item_embed_lookup_variable()
with tf.variable_scope("inputs"):
item_clicked = inputs['item_clicked']
self.item_clicked = item_clicked
#Control features (ensuring that they keep two dims even when the batch has only one session)
self.user_id = inputs['user_id']
self.session_id = inputs['session_id']
self.session_start = inputs['session_start']
seq_lengths = inputs['session_size'] - 1 #Ignoring last click only as label
self.seq_lengths = seq_lengths
#Creates the sessions mask and ensure that rank will be 2 (even when this batch size is 1)
self.item_clicked_mask = tf.sequence_mask(seq_lengths)
event_timestamp = tf.expand_dims(inputs["event_timestamp"], -1)
max_event_timestamp = tf.reduce_max(event_timestamp)
#Retrieving last label of the sequence
label_last_item = labels['label_last_item']
self.label_last_item = label_last_item
all_clicked_items = tf.concat([item_clicked, label_last_item], axis=1)
#Labels
next_item_label = labels['label_next_item']
self.next_item_label = next_item_label
batch_max_session_length = tf.shape(next_item_label)[1]
batch_current_size = array_ops.shape(next_item_label)[0]
with tf.variable_scope("batch_stats"):
#batch_items = self.get_masked_seq_values(inputs['item_clicked'])
#Known bug: The article_id 0 will not be considered as negative sample, because padding values also have value 0
batch_items_nonzero = tf.boolean_mask(all_clicked_items, tf.cast(tf.sign(all_clicked_items), tf.bool))
batch_items_count = tf.shape(batch_items_nonzero)[0]
self.batch_items_count = batch_items_count
batch_unique_items, _ = tf.unique(batch_items_nonzero)
batch_unique_items_count = tf.shape(batch_unique_items)[0]
self.batch_unique_items_count = batch_unique_items_count
tf.summary.scalar('batch_items', family='stats', tensor=batch_items_count)
tf.summary.scalar('batch_unique_items', family='stats', tensor=batch_unique_items_count)
with tf.variable_scope("neg_samples"):
#Samples from recent items buffer
negative_sample_recently_clicked_ids = self.get_sample_from_recently_clicked_items_buffer(
self.negative_sample_from_buffer)
batch_negative_items = self.get_batch_negative_samples(all_clicked_items,
additional_samples=negative_sample_recently_clicked_ids,
num_negative_samples=self.negative_samples)
self.batch_negative_items = batch_negative_items
#WARNING: Must keep these variables under the same variable scope, to avoid leaking the positive item to the network (probably due to normalization)
with tf.variable_scope("user_items_contextual_features"):
user_context_features_concat = self.get_context_features(inputs,
features_config=self.session_features_config['sequence_features'],
features_to_ignore=SESSION_REQ_SEQ_FEATURES)
user_context_features = tf.contrib.layers.layer_norm(user_context_features_concat, center=True, scale=True, begin_norm_axis=2)
if self.plot_histograms:
tf.summary.histogram("user_context_features", user_context_features)
input_items_features = self.get_item_features(item_clicked, event_timestamp, 'clicked')
input_user_items_features = tf.concat([user_context_features] + [input_items_features], axis=2)
if self.plot_histograms:
tf.summary.histogram("input_items_features", input_items_features)
positive_items_features = self.get_item_features(next_item_label, max_event_timestamp, 'positive')
if self.plot_histograms:
tf.summary.histogram("positive_items_features", positive_items_features)
positive_user_items_features = tf.concat([user_context_features, positive_items_features], axis=2)
negative_items_features = self.get_item_features(batch_negative_items, max_event_timestamp, 'negative')
if self.plot_histograms:
tf.summary.histogram("negative_items_features", negative_items_features)
#TODO: Test again batch normalization instead of layer norm (applying activation function after the normalization - dense(activation=none) + batch_norm(activation=X))
with tf.variable_scope("CAR"):
PreCAR_dense = tf.layers.Dense(512,
#TODO: Test tf.nn.elu (has non-zero gradient for values < 0 and function is smooth everywhere)
activation=tf.nn.leaky_relu,
#TODO: Test variance_scaling_initializer(mode="FAN_AVG"), to use the avg of fan_in and fan_out (default is just fan_in)
kernel_initializer=variance_scaling_initializer(),
kernel_regularizer=tf.contrib.layers.l2_regularizer(self.reg_weight_decay),
name="PreCAR_representation"
)
input_contextual_item_embedding_pre_CAR = PreCAR_dense(input_user_items_features)
#tf.summary.scalar('input_contextual_item_embedding_pre_CAR/fraction_of_zero_values', tf.nn.zero_fraction(input_contextual_item_embedding_pre_CAR))
#input_contextual_item_embedding_pre_CAR_dropout = tf.layers.dropout(inputs=input_contextual_item_embedding_pre_CAR,
# rate=1.0-self.keep_prob,
# training=self.is_training)
CAR_dense_pre_dropout = tf.layers.Dropout(rate=1.0-self.keep_prob)
CAR_dense = tf.layers.Dense(CAR_embedding_size,
#activation=tf.nn.relu,
activation=tf.nn.tanh,
kernel_regularizer=tf.contrib.layers.l2_regularizer(self.reg_weight_decay),
name="CAR_representation"
)
with tf.variable_scope("user_personalized_contextual_article_embedding"):
with tf.variable_scope("input"):
input_contextual_item_embedding = CAR_dense(CAR_dense_pre_dropout(input_contextual_item_embedding_pre_CAR))
#tf.summary.scalar('input_contextual_item_embedding/fraction_of_zero_values', tf.nn.zero_fraction(input_contextual_item_embedding))
if self.plot_histograms:
tf.summary.histogram("input_contextual_item_embedding", input_contextual_item_embedding)
with tf.variable_scope("positive"):
positive_contextual_item_embedding = tf.nn.l2_normalize(CAR_dense(CAR_dense_pre_dropout(PreCAR_dense(positive_user_items_features))), axis=-1)
if self.plot_histograms:
tf.summary.histogram("positive_contextual_item_embedding", positive_contextual_item_embedding)
with tf.variable_scope("negative"):
negative_contextual_input_features = cartesian_product(user_context_features,
negative_items_features,
axis=1)
#Apply l2-norm to be able to compute cosine similarity by matrix multiplication
negative_contextual_item_embedding = tf.nn.l2_normalize(CAR_dense(CAR_dense_pre_dropout(PreCAR_dense(negative_contextual_input_features))), axis=-1)
if self.plot_histograms:
tf.summary.histogram("negative_contextual_item_embedding", negative_contextual_item_embedding)
#Building RNN
rnn_outputs = self.build_rnn(input_contextual_item_embedding, seq_lengths, rnn_units=rnn_units)
#tf.summary.scalar('rnn_outputs/fraction_of_zero_values', tf.nn.zero_fraction(input_contextual_item_embedding_pre_CAR))
with tf.variable_scope("session_representation"):
rnn_outputs_fc1 = tf.layers.dense(rnn_outputs, 512,
#TODO: Test tf.nn.elu (has non-zero gradient for values < 0 and function is smooth everywhere)
activation=tf.nn.leaky_relu,
kernel_initializer=variance_scaling_initializer(),
kernel_regularizer=tf.contrib.layers.l2_regularizer(self.reg_weight_decay),
name="FC1"
)
#tf.summary.scalar('rnn_outputs_fc1/fraction_of_zero_values', tf.nn.zero_fraction(rnn_outputs_fc1))
rnn_outputs_fc1_dropout = tf.layers.dropout(inputs=rnn_outputs_fc1,
rate=1.0-self.keep_prob,
training=self.is_training)
rnn_outputs_fc2 = tf.layers.dense(rnn_outputs_fc1_dropout, CAR_embedding_size,
#activation=tf.nn.relu,
activation=tf.nn.tanh,
name='FC2',
kernel_regularizer=tf.contrib.layers.l2_regularizer(self.reg_weight_decay))
#tf.summary.scalar('rnn_outputs_fc2/fraction_of_zero_values', tf.nn.zero_fraction(rnn_outputs_fc1))
if self.plot_histograms:
tf.summary.histogram("rnn_outputs_fc2", rnn_outputs_fc2)
with tf.variable_scope("predicted_contextual_item_embedding"):
#Continuing with DSSM losss
#Apply l2-norm to be able to compute cosine similarity by matrix multiplication
predicted_contextual_item_embedding = tf.nn.l2_normalize(rnn_outputs_fc2, axis=-1)
if self.plot_histograms:
tf.summary.histogram("predicted_contextual_item_embedding", predicted_contextual_item_embedding)
with tf.variable_scope("recommendations_ranking"):
with tf.variable_scope("cos_sim_positive"):
#Computing Cosine similarity between predicted embedding and positive embedding (label)
cos_sim_positive = tf.reduce_sum(tf.multiply(positive_contextual_item_embedding,
predicted_contextual_item_embedding),
axis=-1, keepdims=True)
#print("cos_sim_positive", cos_sim_positive.shape)
if self.plot_histograms:
tf.summary.histogram("train/cos_sim_positive", cos_sim_positive)
with tf.variable_scope("cos_sim_negative"):
#Computing Cosine similarity between predicted embedding and negative items embedding
cos_sim_negative = tf.reduce_sum(tf.multiply(negative_contextual_item_embedding,
tf.expand_dims(predicted_contextual_item_embedding, 2)), axis=-1)
#print("cos_sim_negative", cos_sim_negative.shape)
if self.plot_histograms:
tf.summary.histogram("train/cos_sim_negative", cos_sim_negative)
with tf.variable_scope("positive_prob"):
gamma_var = tf.get_variable('gamma', dtype=tf.float32, trainable=True,
initializer=tf.constant(cosine_loss_gamma))
tf.summary.scalar('gamma', family='train', tensor=gamma_var)
#Concatenating cosine similarities (positive + K sampled negative)
cos_sim_concat = tf.concat([cos_sim_positive, cos_sim_negative], axis=2)
cos_sim_concat_scaled = cos_sim_concat * gamma_var
#Computing softmax over cosine similarities
items_prob = tf.nn.softmax(cos_sim_concat_scaled)
if mode == tf.estimator.ModeKeys.EVAL:
#Computing evaluation metrics
self.define_eval_metrics(next_item_label, batch_negative_items, items_prob)
with tf.variable_scope("loss"):
#Computing the probability of the positive item (label)
positive_prob = items_prob[:,:,0]
negative_probs = items_prob[:,:,1:]
#Summary of first element of the batch sequence (because others might be masked)
if self.plot_histograms:
tf.summary.histogram("positive_prob", positive_prob[:,0])
tf.summary.histogram("negative_probs", negative_probs[:,0,:])
#Computing batch loss
loss_mask = tf.to_float(self.item_clicked_mask)
masked_loss = tf.multiply(tf.log(positive_prob), loss_mask)
#Averaging the loss by the number of masked items in the batch
cosine_sim_loss = -tf.reduce_sum(masked_loss) / tf.reduce_sum(loss_mask)
tf.summary.scalar("train/cosine_sim_loss", family='train', tensor=cosine_sim_loss)
#reg_loss = self.reg_weight_decay * sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables() if not ("noreg" in tf_var.name or "Bias" in tf_var.name))
reg_loss = tf.losses.get_regularization_loss()
tf.summary.scalar("train/reg_loss", family='train', tensor=reg_loss)
self.total_loss = cosine_sim_loss + reg_loss
tf.summary.scalar("train/total_loss", family='train', tensor=self.total_loss)
if mode == tf.estimator.ModeKeys.TRAIN:
with tf.variable_scope('training'):
opt = tf.train.AdamOptimizer(self.lr,
beta1=0.9,
beta2=0.999,
epsilon=1e-08)
#Necessary to run update ops for batch_norm, streaming metrics
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
#self.train = opt.minimize(self.total_loss, global_step=self.gs)
# Get the gradient pairs (Tensor, Variable)
grads = opt.compute_gradients(self.total_loss)
# Update the weights wrt to the gradient
self.train = opt.apply_gradients(grads,
global_step=tf.train.get_global_step()#self.gs
)
if self.plot_histograms:
# Save the grads with tf.summary.histogram (only for debug - SLOW!)
for index, grad in enumerate(grads):
try:
tf.summary.histogram("{}-grad".format(grads[index][1].name), grads[index])
except Exception as e:
print("ERROR generating histogram for %d - %s: %s" % (index, grads[index][1].name, e))
def get_context_features(self, inputs, features_config,
features_to_ignore):
def cat_ohe(feature_name, size, inputs):
return tf.one_hot(inputs[feature_name], size, name="{}_cat_one_hot".format(feature_name))
def cat_embed(feature_name, size, inputs):
#print("GET_CONTEXT_FEATURES(): {}_cat_embedding".format(feature_name))
with tf.variable_scope("{}_cat_embedding".format(feature_name), reuse=tf.AUTO_REUSE):
dim = get_embedding_size(size)
embeddings = tf.get_variable("{}_embedding".format(feature_name), shape=[size, dim],
regularizer=tf.contrib.layers.l2_regularizer(self.reg_weight_decay))
lookup = tf.nn.embedding_lookup(embeddings, ids=inputs[feature_name])#, max_norm=1)
return lookup
with tf.variable_scope("context_features"):
context_features_list = []
for feature_name in features_config:
#Ignores item_clicked and timestamp as user contextual features
if feature_name in features_to_ignore:
continue
if features_config[feature_name]['type'] == 'categorical':
size = features_config[feature_name]['cardinality']
if features_config[feature_name]['cardinality'] < self.max_cardinality_for_ohe:
feature_op = cat_ohe(feature_name, size, inputs)
else:
feature_op = cat_embed(feature_name, size, inputs)
elif features_config[feature_name]['type'] == 'numerical':
feature_op = tf.expand_dims(inputs[feature_name], -1)
else:
raise Exception('Invalid feature type: {}'.format(feature_name))
context_features_list.append(feature_op)
context_features_concat = tf.concat(context_features_list, axis=2)
return context_features_concat
def define_eval_metrics(self, next_item_label, batch_negative_items, items_prob):
with tf.variable_scope("evaluation_metrics"):
with tf.variable_scope("predicted_items"):
next_item_label_expanded = tf.expand_dims(next_item_label, -1)
batch_negative_items_tiled = tf.tile(tf.expand_dims(batch_negative_items, 1), [1, tf.shape(next_item_label_expanded)[1], 1])
pos_neg_items_concat = tf.concat([next_item_label_expanded, batch_negative_items_tiled], 2)
#Predicting item ids from [positive + k negative samples]
items_top_prob_indexes = tf.nn.top_k(items_prob, k=tf.shape(items_prob)[2]).indices
#In older versions of TF
#items_top_prob_indexes_idx = array_ops.where(
# math_ops.not_equal(items_top_prob_indexes, tf.constant(-1, tf.int32)))
items_top_prob_indexes_idx = tf.contrib.layers.dense_to_sparse(items_top_prob_indexes, eos_token=-1).indices
items_top_prob_indexes_val = tf.gather_nd(items_top_prob_indexes, items_top_prob_indexes_idx)
#Takes the first two columns of the index and use sorted indices as the last column
items_top_prob_reordered_indexes = tf.concat([items_top_prob_indexes_idx[:,:2],
tf.expand_dims(tf.cast(items_top_prob_indexes_val, tf.int64), 1)], 1)
predicted_item_ids = tf.reshape(tf.gather_nd(pos_neg_items_concat, items_top_prob_reordered_indexes),
tf.shape(pos_neg_items_concat))
self.predicted_item_ids = predicted_item_ids
#Computing Accuracy@1
self.next_item_accuracy_at_1, self.next_item_accuracy_at_1_update_op = \
tf.metrics.accuracy(predictions=predicted_item_ids[:,:,0],
labels=next_item_label,
weights=tf.to_float(self.item_clicked_mask),
name='accuracy_at_1')
#Computing Recall@N
self.recall_at_n, self.recall_at_n_update_op = tf.contrib.metrics.sparse_recall_at_top_k(
labels=next_item_label_expanded,
top_k_predictions=predicted_item_ids[:,:,:self.metrics_top_n],
weights=tf.to_float(self.item_clicked_mask),
name='hitrate_at_n')
#Computing MRR@N
self.mrr, self.mrr_update_op = self.define_mrr_metric(predicted_item_ids, next_item_label_expanded,
topk=self.metrics_top_n)
#Computing NDCG@N
self.ndcg_at_n_mean, self.ndcg_at_n_mean_update_op = \
self.define_ndcg_metric(predicted_item_ids, next_item_label_expanded, topk=self.metrics_top_n)
def define_ndcg_metric(self, predicted_item_ids, next_item_label_expanded, topk):
with tf.variable_scope("ndcg"):
#Computing NDCG
predicted_correct = tf.to_int32(tf.equal(predicted_item_ids, next_item_label_expanded))
ndcg_predicted = tf_ndcg_at_k(predicted_correct, topk)
#Combining masks of padding items and NDCG zeroed values (because the correct value is not in the top n)
#ndcg_mask = tf.multiply(tf.to_float(self.item_clicked_mask), tf.to_float(tf.sign(ndcg_predicted)))
ndcg_mask = tf.to_float(self.item_clicked_mask)
ndcg_mean, ndcg_mean_update_op = tf.metrics.mean(
values=ndcg_predicted,
weights=ndcg_mask,
name='ndcg_at_n')
return ndcg_mean, ndcg_mean_update_op
def define_mrr_metric(self, predicted_item_ids, next_item_label_expanded, topk):
with tf.variable_scope("mrr"):
reciprocal_ranks = tf.div(tf.constant(1.0), tf.cast(tf.constant(1, tf.int64) + \
tf.where(
tf.logical_and(
tf.equal(next_item_label_expanded,
predicted_item_ids[:,:,:topk]),
tf.expand_dims(self.item_clicked_mask, -1) #Apply mask to sessions with padded items
)
)[:,2],
tf.float32))
batch_valid_labels_count = tf.reduce_sum(tf.to_int32(self.item_clicked_mask))
batch_labels_not_found_in_topk = batch_valid_labels_count - tf.size(reciprocal_ranks)
#Completing with items for which the label was not in the preds (because tf.where() do not return indexes in this case),
#so that mean is consistent
reciprocal_ranks = tf.concat([reciprocal_ranks, tf.zeros(batch_labels_not_found_in_topk)], axis=0)
mrr, mrr_update_op = tf.metrics.mean(
values=reciprocal_ranks,
name='mrr_at_n')
return mrr, mrr_update_op
def get_layer_norm_item_features(self, item_features):
with tf.variable_scope("layer_norm_item_features", reuse=tf.AUTO_REUSE):
item_features_scaled = tf.contrib.layers.layer_norm(item_features, center=True, scale=True, begin_norm_axis=2)
return item_features_scaled
def items_cat_embed(self, item_ids):
#with tf.device('/cpu:0'):
with tf.variable_scope("item_cat_embedding", reuse=tf.AUTO_REUSE):
size = self.items_vocab_size
dim = get_embedding_size(size)
embeddings = tf.get_variable("items_embedding", shape=[size, dim],
regularizer=tf.contrib.layers.l2_regularizer(self.reg_weight_decay))
lookup = tf.nn.embedding_lookup(embeddings, ids=item_ids)#, max_norm=1)
return lookup
def get_item_features(self, item_ids, events_timestamp, summary_suffix):
with tf.variable_scope("item_features"):
#items_ohe = tf.one_hot(item_ids, self.items_vocab_size)
item_clicked_interactions_embedding = self.items_cat_embed(item_ids)
items_acr_embeddings_lookup = tf.nn.embedding_lookup(self.content_embedding_variable, ids=item_ids)
#Obtaining item features for specified items (e.g. clicked, negative samples)
item_contextual_features = {}
for feature_name in self.articles_features_config:
if feature_name not in ARTICLE_REQ_FEATURES:
item_contextual_features[feature_name] = \
tf.gather(self.articles_metadata[feature_name], item_ids)
#Concatenating item contextual features
item_contextual_features = self.get_context_features(item_contextual_features,
features_config=self.articles_features_config,
features_to_ignore=ARTICLE_REQ_FEATURES)
#Taking the maximum timestamp of the batch
#max_event_timestamp = tf.reduce_max(event_timestamp)
#Computing Item Dynamic features (RECENCY and POPULARITY)
items_dynamic_features = self.get_items_dynamic_features(item_ids,
events_timestamp,
summary_suffix=summary_suffix)
#Creating a feature specifically to inform the network whether this is a padding item or not
#item_clicked_not_padding = tf.expand_dims(tf.cast(tf.sign(item_ids), tf.float32), axis=-1)
items_features_list = [
#Item embedding trained by ACR module
items_acr_embeddings_lookup,
#Trainable item embedding
item_clicked_interactions_embedding,
item_contextual_features,
#items_dynamic_features,
#item_clicked_not_padding
]
items_features_concat = tf.concat(items_features_list, axis=2)
#tf.summary.histogram("items_features_norm_BEFORE", tf.boolean_mask(input_items_features_concat, self.item_clicked_mask))
items_features_norm = self.get_layer_norm_item_features(items_features_concat)
#tf.summary.histogram("items_features_norm_AFTER", tf.boolean_mask(input_items_features, self.item_clicked_mask))
return items_features_norm
def normalize_values(self, tensor_to_normalize, tensor_to_get_stats_from):
with tf.variable_scope("values_normalization"):
mean, variance = tf.nn.moments(tensor_to_get_stats_from, axes=[0])
#Fixing size of stats to avoid dynamic last dimension on tensor_normed
mean = tf.reshape(mean, [1])
variance = tf.reshape(variance, [1])
stddev = tf.sqrt(variance)
#To avoid division by zero
epsilon = tf.constant(1e-8)
tensor_normed = (tensor_to_normalize - mean) / (stddev + epsilon)
return tensor_normed
def get_unique_items_from_pop_recent_buffer(self):
with tf.variable_scope("unique_items_from_pop_recent_buffer"):
recent_items_unique, _ = tf.unique(self.pop_recent_items_buffer)
#Removing zero
recent_items_unique = tf.boolean_mask(recent_items_unique,
tf.cast(tf.sign(recent_items_unique), tf.bool))
return recent_items_unique
def calculate_items_recency(self, creation_dates, reference_timestamps):
with tf.variable_scope("calculate_items_recency"):
elapsed_days = tf.nn.relu(((tf.to_float(reference_timestamps) / tf.constant(1000.0)) \
- tf.to_float(creation_dates)) / tf.constant(60.0 * 60.0 * 24.0))
elapsed_days_smoothed = log_1p(elapsed_days, base=self.elapsed_days_smooth_log_base)
return elapsed_days_smoothed
def normalize_recency_feature(self, batch_elapsed_days_since_publishing, batch_events_timestamp, item_ids):
with tf.variable_scope("normalize_recency_feature"):
#Computing global recency stats from buffer
recent_items_unique = self.get_unique_items_from_pop_recent_buffer()
recent_items_creation_date = tf.gather(self.articles_metadata['created_at_ts'], recent_items_unique)
recent_items_elapsed_days_since_creation = self.calculate_items_recency(recent_items_creation_date,
tf.reduce_max(batch_events_timestamp))
recent_items_elapsed_days_since_creation_smoothed = log_1p(recent_items_elapsed_days_since_creation,
base=self.elapsed_days_smooth_log_base)
#Normalizing batch recency feature
batch_elapsed_days_since_publishing_smoothed = log_1p(batch_elapsed_days_since_publishing,
base=self.elapsed_days_smooth_log_base)
#If there aren't recent items available in the buffer (first batch), use batch items to compute norm stats
tensor_to_get_stats_from = tf.cond(tf.equal(tf.shape(recent_items_elapsed_days_since_creation_smoothed)[0], tf.constant(0)),
lambda: tf.boolean_mask(batch_elapsed_days_since_publishing_smoothed, tf.cast(tf.sign(item_ids), tf.bool)),
lambda: recent_items_elapsed_days_since_creation_smoothed)
batch_elapsed_days_since_publishing_normed = self.normalize_values(batch_elapsed_days_since_publishing_smoothed,
tensor_to_get_stats_from)
return batch_elapsed_days_since_publishing_normed
def get_items_recency_feature(self, item_ids, events_timestamp, summary_suffix=''):
with tf.variable_scope("items_recency_feature"):
#Computing RECENCY feature
batch_articles_creation_date = tf.gather(tf.reshape(self.articles_metadata['created_at_ts'],
[-1,1]), item_ids)
elapsed_days_since_publishing = self.calculate_items_recency(batch_articles_creation_date, events_timestamp)
if self.plot_histograms:
tf.summary.histogram('batch_elapsed_days_since_publishing/'+summary_suffix, family='stats',
values=tf.boolean_mask(elapsed_days_since_publishing, tf.cast(tf.sign(item_ids), tf.bool)))
elapsed_days_since_publishing_norm = self.normalize_recency_feature(elapsed_days_since_publishing,
events_timestamp, item_ids)
if self.plot_histograms:
tf.summary.histogram('batch_elapsed_days_since_publishing_norm/'+summary_suffix, family='stats',
values=tf.boolean_mask(elapsed_days_since_publishing_norm, tf.cast(tf.sign(item_ids), tf.bool)))
return elapsed_days_since_publishing_norm, batch_articles_creation_date
def normalize_popularity_feature(self, batch_items_pop, item_ids):
with tf.variable_scope("popularity_feature_normalization"):
#Computing global recency stats from buffer
recent_items_unique = self.get_unique_items_from_pop_recent_buffer()
recent_items_pop = tf.gather(self.articles_pop_recently_clicked, recent_items_unique)
recent_items_pop_smoothed = log_1p(recent_items_pop,
base=self.popularity_smooth_log_base)
self.recent_items_pop_smoothed = recent_items_pop_smoothed
#Normalizing batch recency feature
batch_items_pop_smoothed = log_1p(batch_items_pop,
base=self.popularity_smooth_log_base)
#If there aren't recent items available in the buffer (first batch), use batch items to compute norm stats
tensor_to_get_stats_from = tf.cond(tf.equal(tf.shape(recent_items_pop_smoothed)[0], tf.constant(0)),
lambda: tf.boolean_mask(batch_items_pop_smoothed,
tf.cast(tf.sign(item_ids), tf.bool)),
lambda: recent_items_pop_smoothed)
batch_items_pop_normed = self.normalize_values(batch_items_pop_smoothed,
tensor_to_get_stats_from)
return batch_items_pop_normed
def get_items_popularity_feature(self, item_ids, summary_suffix=''):
#Computing POPULARITY feature
with tf.variable_scope("items_popularity_feature"):
#batch_articles_pop = tf.to_float(tf.gather(self.articles_pop, tf.expand_dims(item_ids, -1)))
batch_articles_pop = tf.to_float(tf.gather(self.articles_pop_recently_clicked, tf.expand_dims(item_ids, -1)))
if self.plot_histograms:
tf.summary.histogram('batch_articles_pop/'+summary_suffix, family='stats',
values=tf.boolean_mask(batch_articles_pop, tf.cast(tf.sign(item_ids), tf.bool)))
batch_articles_pop_norm = self.normalize_popularity_feature(batch_articles_pop, item_ids)
if self.plot_histograms:
tf.summary.histogram('batch_articles_pop_norm/'+summary_suffix, family='stats',
values=tf.boolean_mask(batch_articles_pop_norm, tf.cast(tf.sign(item_ids), tf.bool)))
return batch_articles_pop_norm
def get_items_dynamic_features(self, item_ids, events_timestamp, summary_suffix=''):
with tf.variable_scope("items_dynamic_features", reuse=tf.AUTO_REUSE):
#Computing RECENCY feature
elapsed_days_since_publishing_log, batch_articles_creation_date = \
self.get_items_recency_feature(item_ids, events_timestamp, summary_suffix=summary_suffix)
#Computing POPULARITY feature
batch_articles_pop_log = self.get_items_popularity_feature(item_ids,
summary_suffix=summary_suffix)
dynamic_features_concat = tf.concat([elapsed_days_since_publishing_log,
batch_articles_pop_log],
axis=2)
return dynamic_features_concat
def get_sample_from_recently_clicked_items_buffer(self, sample_size):
with tf.variable_scope("neg_samples_buffer"):
pop_recent_items_buffer_masked = tf.boolean_mask(self.pop_recent_items_buffer,
tf.cast(tf.sign(self.pop_recent_items_buffer), tf.bool))
unique_pop_recent_items_buffer_masked, _ = tf.unique(pop_recent_items_buffer_masked)
#tf.summary.scalar('unique_clicked_items_on_buffer', family='stats', tensor=tf.shape(unique_pop_recent_items_buffer_masked)[0])
tf.summary.scalar('clicked_items_on_buffer', family='stats', tensor=tf.shape(pop_recent_items_buffer_masked)[0])
#recent_items_unique_sample, idxs = tf.unique(tf.random_shuffle(pop_recent_items_buffer_masked)[:sample_size*sample_size_factor_to_look_for_unique])
recent_items_unique_sample = tf.random_shuffle(unique_pop_recent_items_buffer_masked)
#Samples K articles from recent articles
#sample_recent_articles_ids = tf.random_shuffle(articles_metadata_creation_date_past_only)[:recent_articles_samples_for_eval][:,self.articles_metadata_columns_dict['article_id']]
sample_recently_clicked_items = recent_items_unique_sample[:sample_size]
return sample_recently_clicked_items
def get_masked_seq_values(self, tensor):
return tf.boolean_mask(tensor, self.item_clicked_mask, name='masked_values')
def get_negative_samples(self, item_clicked, candidate_samples):
with tf.variable_scope("negative_samples"):
current_batch_size = tf.shape(item_clicked)[0]
#Repeating all unique items for each sample in the batch
batch_candidate_negative_items = tf.reshape(tf.tile(candidate_samples, [self.batch_size]), [self.batch_size,-1])
#Reducing rows if batch size is lower than the default (last step)
batch_candidate_negative_items = batch_candidate_negative_items[:current_batch_size,:]
#For each batch sample, filters out session items to keep only negative items
#Ps. remove last columns (according to max session size) to remove padding zeros.
# Side effect is that higher item ids are ignored for shorter sessions (because set_difference() sorts ids increasinly)
batch_negative_items = tf.sparse_tensor_to_dense(tf.sets.set_difference(batch_candidate_negative_items,
item_clicked))
return batch_negative_items
def get_batch_negative_samples(self, item_clicked, additional_samples, num_negative_samples):
with tf.variable_scope("neg_samples_batch"):
current_batch_size, batch_max_session_length = tf.shape(item_clicked)[0], tf.shape(item_clicked)[1]
batch_items = tf.reshape(item_clicked, [-1])
#Removing padded (zeroed) items
batch_items_unique, _ = tf.unique(tf.boolean_mask(batch_items, tf.cast(tf.sign(batch_items), dtype=tf.bool)))
#Concatenating batch items with additional samples (to deal with small batches)
candidate_neg_items = tf.concat([batch_items_unique, additional_samples], axis=0)
#Ignoring zeroes in the end of neg. samples matrix
batch_negative_items = self.get_negative_samples(item_clicked, candidate_neg_items) \
[:, :-tf.maximum(1,batch_max_session_length-1)]
#Randomly picks K negative samples for each batch sample
#Ps. transpose() is necessary because random_shuffle() only shuffles first dimension, and we want to shuffle the second dimension
#batch_negative_items = tf.transpose(tf.random_shuffle(tf.transpose(batch_negative_items)))[:,:num_negative_samples]
batch_negative_items = shuffle_columns(batch_negative_items)[:,:num_negative_samples]
return batch_negative_items
def build_rnn(self, the_input, lengths, rnn_units=256):
with tf.variable_scope("RNN"):
fw_cells = []
for _ in range(self.rnn_num_layers):
#cell = tf.nn.rnn_cell.GRUCell(rnn_units)
cell = tf.nn.rnn_cell.LSTMCell(rnn_units, state_is_tuple=True)
cell = tf.nn.rnn_cell.DropoutWrapper(cell,
output_keep_prob=self.keep_prob,
input_keep_prob=self.keep_prob)
fw_cells.append(cell)
fw_stacked_cells = tf.contrib.rnn.MultiRNNCell(fw_cells, state_is_tuple=True)
rnn_outputs, rnn_final_hidden_state_tuples = \
tf.nn.dynamic_rnn(fw_stacked_cells, the_input, dtype=tf.float32, sequence_length=lengths)
if self.plot_histograms:
tf.summary.histogram("rnn/outputs", rnn_outputs)
return rnn_outputs
def create_item_embed_lookup_variable(self):
with tf.variable_scope("item_embedding"):
self.content_embedding_variable = tf.Variable(self.content_article_embeddings_matrix,
trainable=False)
class ClickedItemsState:
def __init__(self, recent_clicks_buffer_size, num_items):
self.recent_clicks_buffer_size = recent_clicks_buffer_size
self.num_items = num_items
self.reset_state()
def reset_state(self):
#Global state
self.articles_pop = np.zeros(shape=[self.num_items], dtype=np.int64)
self.pop_recent_clicks_buffer = np.zeros(shape=[self.recent_clicks_buffer_size], dtype=np.int64)
#State shared by ItemCooccurrenceRecommender and ItemKNNRecommender
self.items_coocurrences = csr_matrix((self.num_items, self.num_items), dtype=np.int64)
#States specific for benchmarks
self.benchmarks_states = dict()
def save_state_checkpoint(self):
self.articles_pop_chkp = np.copy(self.articles_pop)
self.pop_recent_clicks_buffer_chkp = np.copy(self.pop_recent_clicks_buffer)
self.items_coocurrences_chkp = csr_matrix.copy(self.items_coocurrences)
self.benchmarks_states_chkp = deepcopy(self.benchmarks_states)
def restore_state_checkpoint(self):
self.articles_pop = self.articles_pop_chkp
del self.articles_pop_chkp
self.pop_recent_clicks_buffer = self.pop_recent_clicks_buffer_chkp
del self.pop_recent_clicks_buffer_chkp
self.items_coocurrences = self.items_coocurrences_chkp
del self.items_coocurrences_chkp
self.benchmarks_states = self.benchmarks_states_chkp
del self.benchmarks_states_chkp
def get_articles_pop(self):
return self.articles_pop
def get_recent_clicks_buffer(self):
return self.pop_recent_clicks_buffer
def get_articles_pop_from_recent_clicks_buffer(self):
recent_clicks_buffer_nonzero = self.pop_recent_clicks_buffer[np.nonzero(self.pop_recent_clicks_buffer)]
recent_clicks_item_counter = Counter(recent_clicks_buffer_nonzero)
pop_recently_clicked = np.zeros(shape=[self.num_items], dtype=np.int64)
pop_recently_clicked[list(recent_clicks_item_counter.keys())] = list(recent_clicks_item_counter.values())
return pop_recently_clicked
def get_items_coocurrences(self):
return self.items_coocurrences
def _get_non_zero_items_vector(self, batch_clicked_items):
#Converting batch items to a vector sorted by last clicked items in sessions
batch_items_vector = batch_clicked_items.T.reshape(-1)[::-1]
return batch_items_vector[np.nonzero(batch_items_vector)]
def update_items_state(self, batch_clicked_items):
batch_items_nonzero = self._get_non_zero_items_vector(batch_clicked_items)
self._update_recently_clicked_items_buffer(batch_items_nonzero)
self._update_pop_items(batch_items_nonzero)
def _update_recently_clicked_items_buffer(self, batch_items_nonzero):
#TODO: Keep on buffer based on time (e.g. last X hours), and not on last N clicks
#Updating buffer with latest clicked elements
self.pop_recent_clicks_buffer = np.hstack([batch_items_nonzero, self.pop_recent_clicks_buffer])[:self.recent_clicks_buffer_size]
def _update_pop_items(self, batch_items_nonzero):
batch_item_counter = Counter(batch_items_nonzero)
self.articles_pop[list(batch_item_counter.keys())] += list(batch_item_counter.values())
def update_items_coocurrences(self, batch_clicked_items):
for session_items in batch_clicked_items:
session_pairs = permutations(session_items[np.nonzero(session_items)], r=2)
rows, cols = zip(*session_pairs)
self.items_coocurrences[rows, cols] += 1
class ItemsStateUpdaterHook(tf.train.SessionRunHook):
"""Saves summaries during eval loop."""
def __init__(self, mode, model, eval_metrics_top_n,
clicked_items_state, eval_sessions_metrics_log,
sessions_negative_items_log,
eval_benchmark_classifiers=[],
eval_metrics_by_session_position=False):
self.mode = mode
self.model = model
self.eval_metrics_top_n = eval_metrics_top_n
self.clicked_items_state = clicked_items_state
self.eval_sessions_metrics_log = eval_sessions_metrics_log
self.sessions_negative_items_log = sessions_negative_items_log
self.bench_classifiers = [clf['recommender'](self.clicked_items_state,
clf['params'],
ItemsStateUpdaterHook.create_eval_metrics(self.eval_metrics_top_n)) for clf in eval_benchmark_classifiers]
self.eval_metrics_by_session_position = eval_metrics_by_session_position
def begin(self):
if self.mode == tf.estimator.ModeKeys.EVAL:
tf.logging.info("Saving items state checkpoint from train")
#Save state of items popularity and recency from train loop, to restore after evaluation finishes
self.clicked_items_state.save_state_checkpoint()
#Resets streaming metrics
self.eval_streaming_metrics_last = {}
for clf in self.bench_classifiers:
clf.reset_eval_metrics()
self.streaming_metrics = ItemsStateUpdaterHook.create_eval_metrics(self.eval_metrics_top_n)
#self.metrics_by_session_pos = StreamingMetrics(topn=self.metrics_top_n)
self.stats_logs = []
#Runs before every batch
def before_run(self, run_context):
fetches = {'clicked_items': self.model.item_clicked,
'next_item_labels': self.model.next_item_label,
'last_item_label': self.model.label_last_item,
'session_id': self.model.session_id,
'session_start': self.model.session_start,
'user_id': self.model.user_id,
}
if self.mode == tf.estimator.ModeKeys.EVAL:
fetches['eval_batch_negative_items'] = self.model.batch_negative_items
fetches['batch_items_count'] = self.model.batch_items_count
fetches['batch_unique_items_count'] = self.model.batch_unique_items_count
fetches['hitrate_at_1'] = self.model.next_item_accuracy_at_1_update_op
fetches['hitrate_at_n'] = self.model.recall_at_n_update_op
fetches['mrr_at_n'] = self.model.mrr_update_op
#fetches['ndcg_at_n'] = self.model.ndcg_at_n_mean_update_op
fetches['predicted_item_ids'] = self.model.predicted_item_ids
feed_dict = {
self.model.articles_pop: self.clicked_items_state.get_articles_pop(),
self.model.pop_recent_items_buffer: self.clicked_items_state.get_recent_clicks_buffer(),
self.model.articles_pop_recently_clicked: self.clicked_items_state.get_articles_pop_from_recent_clicks_buffer()
}
return tf.train.SessionRunArgs(fetches=fetches,
feed_dict=feed_dict)
def evaluate_and_update_streaming_metrics_last(self, clf, users_ids, clicked_items, next_item_labels, eval_negative_items):
clf_metrics = clf.evaluate(users_ids, clicked_items, next_item_labels, topk=self.eval_metrics_top_n,
eval_negative_items=eval_negative_items)
self.eval_streaming_metrics_last = merge_two_dicts(self.eval_streaming_metrics_last, clf_metrics)
def evaluate_metrics_by_session_pos(self, predictions, labels):
recall_by_session_pos, recall_total_by_session_pos = self.metrics_by_session_pos.recall_at_n_by_session_pos(predictions, labels, self.metrics_top_n)
recall_by_session_pos_dict = dict([("recall_by_session_pos_{0:02d}".format(key), recall_by_session_pos[key]) for key in recall_by_session_pos])
sessions_length_dict = dict([("sessions_length_count_{0:02d}".format(key), recall_total_by_session_pos[key]) for key in recall_total_by_session_pos])
self.eval_streaming_metrics_last = merge_two_dicts(merge_two_dicts(self.eval_streaming_metrics_last, recall_by_session_pos_dict), sessions_length_dict)
#Runs after every batch
def after_run(self, run_context, run_values):
clicked_items = run_values.results['clicked_items']
next_item_labels = run_values.results['next_item_labels']
last_item_label = run_values.results['last_item_label']
users_ids = run_values.results['user_id']
sessions_ids = run_values.results['session_id']
if self.mode == tf.estimator.ModeKeys.EVAL:
self.eval_streaming_metrics_last = {}
self.eval_streaming_metrics_last['hitrate_at_1'] = run_values.results['hitrate_at_1']
self.eval_streaming_metrics_last['hitrate_at_n'] = run_values.results['hitrate_at_n']
self.eval_streaming_metrics_last['mrr_at_n'] = run_values.results['mrr_at_n']
#self.eval_streaming_metrics_last['ndcg_at_n'] = run_values.results['ndcg_at_n']
predicted_item_ids = run_values.results['predicted_item_ids']
#tf.logging.info('predicted_item_ids: {}'.format(predicted_item_ids))
if self.eval_metrics_by_session_position:
self.evaluate_metrics_by_session_pos(predicted_item_ids, next_item_labels)
eval_batch_negative_items = run_values.results['eval_batch_negative_items']
if self.sessions_negative_items_log != None:
#Acumulating session negative items, to allow evaluation comparison
# with benchmarks outsite the framework (e.g. Matrix Factorization)
for session_id, neg_items in zip(sessions_ids,
eval_batch_negative_items):
self.sessions_negative_items_log.append({'session_id': str(session_id), #Convert numeric session_id to str because large ints are not serializable
'negative_items': neg_items})
batch_stats = {'eval_sampled_negative_items': eval_batch_negative_items.shape[1],
'batch_items_count': run_values.results['batch_items_count'],
'batch_unique_items_count': run_values.results['batch_unique_items_count'],
'batch_sessions_count': len(sessions_ids)
#'recent_items_buffer_filled': np.count_nonzero(clicked_items_state.get_recent_clicks_buffer()),
}
self.stats_logs.append(batch_stats)
tf.logging.info('batch_stats: {}'.format(batch_stats))
#Computing metrics for this neural model
model_metrics_values = compute_metrics(predicted_item_ids, next_item_labels,
self.streaming_metrics,
metrics_suffix='main')
self.eval_streaming_metrics_last = merge_two_dicts(self.eval_streaming_metrics_last,
model_metrics_values)
#Computing metrics for Benchmark recommenders
for clf in self.bench_classifiers:
tf.logging.info('Evaluating benchmark: {}'.format(clf.get_description()))
self.evaluate_and_update_streaming_metrics_last(clf, users_ids,
clicked_items, next_item_labels, eval_batch_negative_items)
tf.logging.info('Finished benchmarks evaluation')
#Training benchmark classifier
for clf in self.bench_classifiers:
#As for GCom dataset session_ids are not timestamps, generating artificial session_ids
# by concatenating session_start with hashed session ids to make it straightforward to sort them by time
#TODO: In the next generation of Gcom dataset, make this transformation before saving to TFRecord and remove from here
'''
sessions_ids_hashed = list([int('{}{}'.format(session_start, hash_str_to_int(session_id, 3))) \
for session_start, session_id in zip(run_values.results['session_start'],
run_values.results['session_id'])])
clf.train(users_ids, sessions_ids_hashed, clicked_items, next_item_labels)
'''
clf.train(users_ids, sessions_ids, clicked_items, next_item_labels)
#Concatenating all clicked items in the batch (including last label)
batch_clicked_items = np.concatenate([clicked_items,last_item_label], axis=1)
#Updating items state
self.clicked_items_state.update_items_state(batch_clicked_items)
self.clicked_items_state.update_items_coocurrences(batch_clicked_items)
def end(self, session=None):
if self.mode == tf.estimator.ModeKeys.EVAL:
avg_neg_items = np.mean([x['eval_sampled_negative_items'] for x in self.stats_logs])
self.eval_streaming_metrics_last['avg_eval_sampled_neg_items'] = avg_neg_items
clicks_count = np.sum([x['batch_items_count'] for x in self.stats_logs])
self.eval_streaming_metrics_last['clicks_count'] = clicks_count
sessions_count = np.sum([x['batch_sessions_count'] for x in self.stats_logs])
self.eval_streaming_metrics_last['sessions_count'] = sessions_count
self.eval_sessions_metrics_log.append(self.eval_streaming_metrics_last)
eval_metrics_str = '\n'.join(["'{}':\t{:.4f}".format(metric, value) for metric, value in sorted(self.eval_streaming_metrics_last.items())])
tf.logging.info("Evaluation metrics: [{}]".format(eval_metrics_str))
tf.logging.info("Restoring items state checkpoint from train")
#Restoring the original state of items popularity and recency state from train loop
self.clicked_items_state.restore_state_checkpoint()
@staticmethod
def create_eval_metrics(top_n):
eval_metrics = [metric(topn=top_n) for metric in [HitRate, MRR]]
return eval_metrics
``` |
{
"source": "jiaruixu/Detectron",
"score": 3
} |
#### File: cityscapesscripts/preparation/createTrainIdInstanceImgs.py
```python
from __future__ import print_function
import os, glob, sys
# cityscapes imports
sys.path.append( os.path.normpath( os.path.join( os.path.dirname( __file__ ) , '..' , 'helpers' ) ) )
from csHelpers import printError
from json2instanceImg import json2instanceImg
# The main method
def main():
# Where to look for Cityscapes
if 'CITYSCAPES_DATASET' in os.environ:
cityscapesPath = os.environ['CITYSCAPES_DATASET']
else:
cityscapesPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','..')
# how to search for all ground truth
searchFine = os.path.join( cityscapesPath , "gtFine" , "*" , "*" , "*_gt*_polygons.json" )
searchCoarse = os.path.join( cityscapesPath , "gtCoarse" , "*" , "*" , "*_gt*_polygons.json" )
# search files
filesFine = glob.glob( searchFine )
filesFine.sort()
filesCoarse = glob.glob( searchCoarse )
filesCoarse.sort()
# concatenate fine and coarse
files = filesFine + filesCoarse
# files = filesFine # use this line if fine is enough for now.
# quit if we did not find anything
if not files:
printError( "Did not find any files. Please consult the README." )
# a bit verbose
print("Processing {} annotation files".format(len(files)))
# iterate through files
progress = 0
print("Progress: {:>3} %".format( progress * 100 / len(files) ), end=' ')
for f in files:
# create the output filename
dst = f.replace( "_polygons.json" , "_instanceTrainIds.png" )
# do the conversion
try:
json2instanceImg( f , dst , "trainIds" )
except:
print("Failed to convert: {}".format(f))
raise
# status
progress += 1
print("\rProgress: {:>3} %".format( progress * 100 / len(files) ), end=' ')
sys.stdout.flush()
# call the main
if __name__ == "__main__":
main()
``` |
{
"source": "JiarunLiu/Co-correcting",
"score": 2
} |
#### File: JiarunLiu/Co-correcting/BasicTrainer.py
```python
import os
import copy
import json
import datetime
import numpy as np
from os.path import join
import torch
import torchvision
from dataset.cifar import CIFAR10, CIFAR100
from dataset.mnist import MNIST
from dataset.ISIC import ISIC
from dataset.clothing1m import Clothing1M
from dataset.PatchCamelyon import PatchCamelyon
from models.densenet import densenet121, densenet161, densenet169, densenet201
from models.resnet import resnet18, resnet34, resnet50, resnet101, resnet152
from models.preact_resnet import PreActResNet18, PreActResNet34, PreActResNet50, PreActResNet101, PreActResNet152
from models.coteaching_model import MLPNet, CNN_small, CNN
class BasicTrainer(object):
def __init__(self, args):
self._get_args(args)
if self.args.random_seed is not None:
torch.manual_seed(self.args.random_seed)
def _save_meta(self):
# save meta data
print(vars(self.args))
nowTime = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
with open(join(self.args.dir, "settings-{}.json".format(nowTime)), 'w') as f:
json.dump(vars(self.args), f, indent=4, sort_keys=True)
def _get_args(self, args):
self.args = args
# addition args
self.args.checkpoint_dir = join(self.args.dir, "checkpoint.pth.tar")
self.args.modelbest_dir = join(self.args.dir, "model_best.pth.tar")
self.args.record_dir = join(self.args.dir, 'record.json')
self.args.y_file = join(self.args.dir, "y.npy")
self.best_prec1 = 0
def _get_model(self, backbone):
if backbone == 'resnet18':
model = resnet18(pretrained=True, num_classes=self.args.classnum).to(self.args.device)
elif backbone == 'resnet34':
model = resnet34(pretrained=True, num_classes=self.args.classnum).to(self.args.device)
elif backbone == 'resnet50':
model = resnet50(pretrained=True, num_classes=self.args.classnum).to(self.args.device)
elif backbone == 'resnet101':
model = resnet101(pretrained=True, num_classes=self.args.classnum).to(self.args.device)
elif backbone == 'resnet152':
model = resnet152(pretrained=True, num_classes=self.args.classnum).to(self.args.device)
elif backbone == 'preact_resnet18':
model = PreActResNet18(num_classes=self.args.classnum, input_size=self.args.image_size,
input_dim=self.args.input_dim).to(self.args.device)
elif backbone == 'preact_resnet34':
model = PreActResNet34(num_classes=self.args.classnum, input_size=self.args.image_size,
input_dim=self.args.input_dim).to(self.args.device)
elif backbone == 'preact_resnet50':
model = PreActResNet50(num_classes=self.args.classnum, input_size=self.args.image_size,
input_dim=self.args.input_dim).to(self.args.device)
elif backbone == 'preact_resnet101':
model = PreActResNet101(num_classes=self.args.classnum, input_size=self.args.image_size,
input_dim=self.args.input_dim).to(self.args.device)
elif backbone == 'preact_resnet152':
model = PreActResNet152(num_classes=self.args.classnum, input_size=self.args.image_size,
input_dim=self.args.input_dim).to(self.args.device)
elif backbone == 'densenet121':
model = densenet121(num_classes=self.args.classnum, pretrained=True).to(self.args.device)
elif backbone == 'densenet161':
model = densenet161(num_classes=self.args.classnum, pretrained=True).to(self.args.device)
elif backbone == 'densenet169':
model = densenet169(num_classes=self.args.classnum, pretrained=True).to(self.args.device)
elif backbone == 'densenet201':
model = densenet201(num_classes=self.args.classnum, pretrained=True).to(self.args.device)
elif backbone == 'mlp':
model = MLPNet().to(self.args.device)
elif backbone == 'cnn_small' or backbone == "CNN_SMALL":
model = CNN_small(self.args.classnum).to(self.args.device)
elif backbone == "cnn" or backbone == "CNN":
model = CNN(n_outputs=self.args.classnum, input_channel=self.args.input_dim, linear_num=self.args.linear_num).to(self.args.device)
else:
print("No matched backbone. Using ResNet50...")
model = resnet50(pretrained=True, num_classes=self.args.classnum,
input_size=self.args.image_size).to(self.args.device)
return model
def _get_optim(self, parm, optim="SGD", scheduler=None, lr=None):
if optim == "SGD" or optim == "sgd":
optimizer = torch.optim.SGD(parm, lr=lr if lr else self.args.lr, momentum=self.args.momentum, weight_decay=self.args.weight_decay)
elif optim == "adam" or optim == "Adam" or optim == "ADAM":
optimizer = torch.optim.Adam(parm, lr=lr if lr else self.args.lr)
elif optim == "adamw" or optim == "AdamW":
optimizer = torch.optim.AdamW(parm, lr=lr if lr else self.args.lr)
elif optim == "RMSprop" or optim == "rmsprop":
optimizer = torch.optim.RMSprop(parm, lr=lr if lr else self.args.lr, momentum=self.args.momentum, weight_decay=self.args.weight_decay)
elif optim == "Adadelta":
optimizer = torch.optim.Adadelta(parm, lr=lr if lr else self.args.lr)
elif optim == "Adagrad":
optimizer = torch.optim.Adagrad(parm, lr=lr if lr else self.args.lr)
else:
NotImplementedError("No Such Optimizer Implemented: {}".format(optim))
return optimizer
def _get_dataset_isic(self):
transform = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(p=0.5),
torchvision.transforms.RandomVerticalFlip(p=0.5),
torchvision.transforms.RandomRotation(degrees=[-180, 180]),
torchvision.transforms.Resize(self.args.image_size),
torchvision.transforms.ToTensor(),
])
transform1 = torchvision.transforms.Compose([
torchvision.transforms.Resize(self.args.image_size),
torchvision.transforms.ToTensor(),
])
trainset = ISIC(root=self.args.root,
train=0,
transform=transform,
noise_type=self.args.noise_type,
noise_rate=self.args.noise,
device=self.args.data_device,
redux=self.args.train_redux,
image_size=self.args.image_size)
testset = ISIC(root=self.args.root,
train=1,
transform=transform1,
noise_type='clean',
noise_rate=self.args.noise,
device=self.args.data_device,
redux=self.args.test_redux,
image_size=self.args.image_size)
valset = ISIC(root=self.args.root,
train=2,
transform=transform1,
noise_type='clean',
noise_rate=self.args.noise,
device=self.args.data_device,
redux=self.args.val_redux,
image_size=self.args.image_size)
return trainset, testset, valset
def _get_dataset_pcam(self):
transform = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(p=0.5),
torchvision.transforms.RandomVerticalFlip(p=0.5),
torchvision.transforms.RandomRotation(degrees=[-90, 90]),
torchvision.transforms.ColorJitter(0.2, 0.75, 0.25, 0.04),
torchvision.transforms.Resize(self.args.image_size),
torchvision.transforms.ToTensor(),
])
transform1 = torchvision.transforms.Compose([
torchvision.transforms.Resize(self.args.image_size),
torchvision.transforms.ToTensor(),
])
trainset = PatchCamelyon(root=self.args.root,
train=0,
transform=transform,
noise_type=self.args.noise_type,
noise_rate=self.args.noise,
redux=self.args.train_redux,
random_ind_redux=self.args.random_ind_redux)
testset = PatchCamelyon(root=self.args.root,
train=1,
transform=transform1,
noise_type='clean',
noise_rate=0,
redux=self.args.test_redux,
random_ind_redux = self.args.random_ind_redux)
valset = PatchCamelyon(root=self.args.root,
train=2,
transform=transform1,
noise_type='clean',
noise_rate=0,
redux=self.args.val_redux,
random_ind_redux=self.args.random_ind_redux)
return trainset, testset, valset
def _get_dataset_mnist(self):
transform1 = torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.ColorJitter(0.2, 0.75, 0.25, 0.04),
torchvision.transforms.ToTensor(),
])
transform = torchvision.transforms.ToTensor()
trainset = MNIST(root=self.args.root,
download=True,
train=0,
transform=transform1,
noise_type=self.args.noise_type,
noise_rate=self.args.noise,
redux=self.args.train_redux,
)
testset = MNIST(root=self.args.root,
download=True,
train=1,
transform=transform,
noise_type='clean',
noise_rate=0,
redux=self.args.test_redux,
full_test=self.args.full_test,
)
valset = MNIST(root=self.args.root,
download=True,
train=2,
transform=transform,
noise_type='clean',
noise_rate=0,
redux=self.args.val_redux,
)
return trainset, testset, valset
def _load_data(self):
if self.args.dataset == 'isic':
trainset, testset, valset = self._get_dataset_isic()
elif self.args.dataset == 'mnist':
trainset, testset, valset = self._get_dataset_mnist()
elif self.args.dataset == 'pcam':
trainset, testset, valset = self._get_dataset_pcam()
else:
NotImplementedError("Dataset [{}] Was Not Been Implemented".format(self.args.dataset))
trainloader = torch.utils.data.DataLoader(trainset, batch_size=self.args.batch_size,
shuffle=True, num_workers=self.args.workers,
pin_memory=True if self.args.data_device == 1 else False)
testloader = torch.utils.data.DataLoader(testset, batch_size=self.args.batch_size,
shuffle=False, num_workers=self.args.workers,
pin_memory=True if self.args.data_device == 1 else False)
valloader = torch.utils.data.DataLoader(valset, batch_size=self.args.batch_size,
shuffle=False, num_workers=self.args.workers,
pin_memory=True if self.args.data_device == 1 else False)
self.train_batch_num = len(trainloader)
self.test_batch_num = len(testloader)
self.val_batch_num = len(valloader)
self.train_data_num = len(trainset)
self.test_data_num = len(testset)
self.val_data_num = len(valset)
self.noise_or_not = trainset.noise_or_not
self.clean_labels = trainset.labels
print("Train num: {}\tTest num: {}\tVal num: {}".format(len(trainset), len(testset), len(valset)))
return trainloader, testloader, valloader
```
#### File: Co-correcting/dataset/ISIC.py
```python
import torch.utils.data
import torchvision.transforms as transforms
import numpy as np
from PIL import Image
import os
import os.path
from os.path import join
import json
from dataset.utils import noisify
class ISIC(torch.utils.data.Dataset):
'''
Notices:
train/val/test : first 60%/10%/30%
hxw:767 x 1022
'''
def __init__(self,
root,
train=0,
transform=None,
target_transform=None,
noise_type='clean',
noise_rate=0.00,
device=1,
redux=None,
image_size=None
):
base_folder = root
self.image_folder = join(base_folder, 'Images')
self.data_list_f = join(base_folder, "data_list2.json")
self.label_folder = join(base_folder, 'Descriptions')
self.labelOrder = ['benign', 'malignant']
self.root = root
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.device = device # 0: hardware; 1: RAM
self.noise_type = noise_type
self.random_state = 0
with open(self.data_list_f, 'r') as data_f:
data_dict = json.load(data_f)
if self.train == 0:
self.data_list = data_dict['train']
elif self.train == 1:
self.data_list = data_dict['test']
else:
self.data_list = data_dict['val']
if redux:
self.data_list = self.data_list[:redux]
if image_size == None:
self.imageTransform = transforms.Compose([
transforms.Resize((720, 720), interpolation=Image.NEAREST)
])
else:
self.imageTransform = transforms.Compose([
transforms.Resize((image_size,image_size), interpolation=Image.NEAREST)
])
print("Loading data from {}".format(self.label_folder))
# now load the picked numpy arrays
self.data = []
self.labels = []
for f in self.data_list:
file = join(self.label_folder, f)
ff = open(file)
entry = json.load(ff)
try:
flabel = entry['meta']['clinical']['benign_malignant']
if not flabel in self.labelOrder:
raise Exception
label_ = self.labelOrder.index(flabel)
except:
label_ = 0 # All 19 kinds,0-17 normal label, 18 as exception
data_ = join(self.image_folder, f + '.jpeg')
#print(data_)
assert os.path.isfile(data_)
if self.device == 1:
data_ = self.img_loader(data_)
self.data.append(data_)
self.labels.append(label_)
if self.device == 1:
self.data == np.concatenate(self.data)
# noisy labels
self.labels = np.asarray(self.labels)
if noise_type == 'clean':
self.noise_or_not = np.ones([len(self.labels)], dtype=np.bool)
else:
self.noisy_labels, self.actual_noise_rate = noisify(dataset="ISIC",
nb_classes=2,
train_labels=np.expand_dims(self.labels, 1),
noise_type=noise_type,
noise_rate=noise_rate,
random_state=self.random_state)
self.noisy_labels = self.noisy_labels.squeeze()
self.noise_or_not = self.noisy_labels == self.labels
def img_loader(self, img_path):
return np.asarray(self.imageTransform(Image.open(img_path))).astype(np.uint8)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target,index) where target is index of the target class.
"""
img = self.img_loader(self.data[index]) if self.device == 0 else self.data[index]
target = self.labels[index] if self.noise_type == 'clean' else self.noisy_labels[index]
# doing this so that it is consistent with all other datasets to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, index
def __len__(self):
return len(self.data_list)
```
#### File: Co-correcting/utils/label_checker.py
```python
import json
import numpy as np
from os.path import join
def check_label_acc(A, B, onehotA=False, onehotB=False):
"""
get correct label percent in all labels
:param A: label A
:param B: label B
:param onehotA: bool, is label A in onehot?
:param onehotB: bool, is label B in onehot?
:return: matched percent in total labels
"""
A = np.argmax(A, axis=1) if onehotA else A
B = np.argmax(B, axis=1) if onehotB else B
try:
assert A.shape == B.shape
except:
redux = min(A.shape[0], B.shape[0])
A = A[:redux]
B = B[:redux]
t = np.sum(A == B)
accu = t / len(A)
return accu
def check_label_noisy2true(new_label, clean_label, noise_or_not, onehotA=False, onehotB=False):
new_label = np.argmax(new_label, axis=1) if onehotA else new_label
clean_label = np.argmax(clean_label, axis=1) if onehotB else clean_label
try:
assert new_label.shape == clean_label.shape
except:
redux = min(new_label.shape[0], clean_label.shape[0])
new_label = new_label[:redux]
clean_label = clean_label[:redux]
assert new_label.shape == noise_or_not.shape
assert new_label.shape == clean_label.shape
n2t_num = np.sum((new_label == clean_label).astype(np.int32) * (~noise_or_not).astype(np.int32))
n2t = n2t_num / clean_label.shape[0]
return n2t
def check_label_true2noise(new_label, clean_label, noise_or_not, onehotA=False, onehotB=False):
new_label = np.argmax(new_label, axis=1) if onehotA else new_label
clean_label = np.argmax(clean_label, axis=1) if onehotB else clean_label
try:
assert new_label.shape == clean_label.shape
except:
redux = min(new_label.shape[0], clean_label.shape[0])
new_label = new_label[:redux]
clean_label = clean_label[:redux]
assert new_label.shape == noise_or_not.shape
assert new_label.shape == clean_label.shape
t2n_num = np.sum((new_label != clean_label).astype(np.int32) * noise_or_not.astype(np.int32))
t2n = t2n_num / clean_label.shape[0]
return t2n
def check_label(new_label, clean_label, noise_or_not, onehotA=False, onehotB=False):
acc = check_label_acc(new_label, clean_label, onehotA, onehotB)
n2t = check_label_noisy2true(new_label, clean_label, noise_or_not, onehotA, onehotB)
t2n = check_label_true2noise(new_label, clean_label, noise_or_not, onehotA, onehotB)
return acc, n2t, t2n
```
#### File: Co-correcting/utils/settings.py
```python
import sys
import argparse
def get_args():
if sys.platform == 'darwin':
clothing1m_root = "/home/fgldlb/Documents/dataset/Clothing-1M"
isic_root = '/Users/jiarunliu/Documents/BUCT/Label_517/dataset/ISIC-Archive-Downloader/Data_sample_balanced'
mnist_root = '/Users/jiarunliu/Documents/BUCT/Label_517/dataset/MNIST'
cifar10_root = '/Users/jiarunliu/Documents/BUCT/Label_517/dataset/cifar/cifar10'
cifar100_root = '/Users/jiarunliu/Documents/BUCT/Label_517/dataset/cifar/cifar100'
pcam_root = "/Users/jiarunliu/Documents/BUCT/Label_517/dataset/PatchCamelyon"
batch_size = 8
device = 'cpu'
data_device = 0
noise_type = 'sn'
stage1 = 1
stage2 = 3
elif sys.platform == 'linux':
clothing1m_root = "/home/fgldlb/Documents/dataset/Clothing-1M"
isic_root = '/home/fgldlb/Documents/ISIC-Archive-Downloader/NewData'
pcam_root = "/home/fgldlb/Documents/dataset/PatchCamelyon"
mnist_root = './data/mnist'
cifar10_root = './data/cifar10'
cifar100_root = './data/cifar100'
batch_size = 32
device = 'cuda:0'
data_device = 1
noise_type = 'sn'
stage1 = 70
stage2 = 200
else:
clothing1m_root = "/home/fgldlb/Documents/dataset/Clothing-1M"
isic_root = None
mnist_root = './data/mnist'
cifar10_root = '/data/cifar10'
cifar100_root = '/data/cifar100'
pcam_root = None
batch_size = 16
device = 'cpu'
data_device = 0
noise_type = 'clean'
stage1 = 70
stage2 = 200
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# normal parameters
parser.add_argument('-b', '--batch-size', default=batch_size, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=1e-4, type=float,
metavar='H-P', help='initial learning rate')
parser.add_argument('--lr2', '--learning-rate2', default=1e-5, type=float,
metavar='H-P', help='initial learning rate of stage3')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-3, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--backbone', dest="backbone", default="resnet50", type=str,
help="backbone for PENCIL training")
parser.add_argument('--optim', dest="optim", default="SGD", type=str,
choices=['SGD', 'Adam', 'AdamW', 'RMSprop', 'Adadelta', 'Adagrad', 'mix'],
help="Optimizer for PENCIL training")
parser.add_argument('--scheduler', dest='scheduler', default=None, type=str, choices=['cyclic', None, "SWA"],
help="Optimizer for PENCIL training")
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
# Co-teaching parameters
parser.add_argument('--forget-rate', '--fr', '--forget_rate', default=0.2, type=float,
metavar='H-P', help='Forget rate. Suggest same with noisy density.')
parser.add_argument('--num-gradual', '--ng', '--num_gradual', default=10, type=int,
metavar='H-P', help='how many epochs for linear drop rate, can be 5, 10, 15. '
'This parameter is equal to Tk for R(T) in Co-teaching paper.')
parser.add_argument('--exponent', default=1, type=float,
metavar='H-P', help='exponent of the forget rate, can be 0.5, 1, 2. '
'This parameter is equal to c in Tc for R(T) in Co-teaching paper.')
parser.add_argument('--loss-type', dest="loss_type", default="coteaching_plus", type=str,
choices=['coteaching_plus', 'coteaching'],
help="loss type: [coteaching_plus, coteaching]")
parser.add_argument('--warmup', '--wm', '--warm-up', default=0, type=float,
metavar='H-P', help='Warm up process eopch, default 0.')
parser.add_argument('--linear-num', '--linear_num', default=256, type=int,
metavar='H-P', help='how many epochs for linear drop rate, can be 5, 10, 15. '
'This parameter is equal to Tk for R(T) in Co-teaching paper.')
# PENCIL parameters
parser.add_argument('--alpha', default=0.4, type=float,
metavar='H-P', help='the coefficient of Compatibility Loss')
parser.add_argument('--beta', default=0.1, type=float,
metavar='H-P', help='the coefficient of Entropy Loss')
parser.add_argument('--lambda1', default=200, type=int,
metavar='H-P', help='the value of lambda, ')
parser.add_argument('--K', default=10.0, type=float, )
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--epochs', default=320, type=int, metavar='H-P',
help='number of total epochs to run')
parser.add_argument('--stage1', default=stage1, type=int,
metavar='H-P', help='number of epochs utill stage1')
parser.add_argument('--stage2', default=stage2, type=int,
metavar='H-P', help='number of epochs utill stage2')
# Nosie settings
parser.add_argument('--noise', default=0.20, type=float,
help='noise density of data label')
parser.add_argument('--noise_type', default=noise_type, choices=['clean', 'sn', 'pairflip'],type=str,
help='noise tyoe of data label')
# Data settings
parser.add_argument("--dataset", dest="dataset", default='mnist', type=str,
choices=['mnist', 'cifar10', 'cifar100', 'cifar2', 'isic', 'clothing1m', 'pcam'],
help="model input image size")
parser.add_argument("--image_size", dest="image_size", default=224, type=int,
help="model input image size")
parser.add_argument('--classnum', default=2, type=int,
metavar='H-P', help='number of train dataset classes')
parser.add_argument('--device', dest='device', default=device, type=str,
help='select gpu')
parser.add_argument('--data_device', dest="data_device", default=data_device, type=int,
help="Dataset loading device, 0 for hardware 1 for RAM. Default choice is 1. "
"Please ensure your computer have enough capacity!")
parser.add_argument('--dataRoot',dest='root',default=isic_root,
type=str,metavar='PATH',help='where is the dataset')
parser.add_argument('--datanum', default=15000, type=int,
metavar='H-P', help='number of train dataset samples')
parser.add_argument('--train-redux', dest="train_redux", default=None, type=int,
help='train data number, default None')
parser.add_argument('--test-redux', dest="test_redux", default=None, type=int,
help='test data number, default None')
parser.add_argument('--val-redux', dest="val_redux", default=None, type=int,
help='validate data number, default None')
parser.add_argument('--full-test', dest="full_test", default=False, type=bool,
help='use full test set data, default False')
parser.add_argument('--random-ind-redux', dest="random_ind_redux", default=False, type=bool,
help='use full test set data, default False')
# Curriculum settings
parser.add_argument("--curriculum", dest="curriculum", default=1, type=int,
help="curriculum in label updating")
parser.add_argument("--cluster-mode", dest="cluster_mode", default='dual', type=str, choices=['dual', 'single', 'dual_PCA'],
help="curriculum in label updating")
parser.add_argument("--dim-reduce", dest="dim_reduce", default=256, type=int,
help="Curriculum features dim reduce by PCA")
parser.add_argument("--mix-grad", dest="mix_grad", default=1, type=int,
help="mix gradient of two-stream arch, 1=True")
parser.add_argument("--discard", dest="discard", default=0, type=int,
help="only update discard sample's label, 1=True")
parser.add_argument("--gamma", dest="gamma", default=0.6, type=int,
help="forget rate schelduler param")
parser.add_argument("--finetune-schedule", '-fs', dest="finetune_schedule", default=0, type=int,
help="forget rate schelduler param")
# trainer settings
parser.add_argument('--dir', dest='dir', default="experiment/test-debug", type=str,
metavar='PATH', help='save dir')
parser.add_argument('--random-seed', dest='random_seed', default=None, type=int,
metavar='N', help='pytorch random seed, default None.')
args = parser.parse_args()
# Setting for different dataset
if args.dataset == "isic":
print("Training on ISIC")
args.backbone = 'resnet50'
args.image_size = 224
args.classnum = 2
args.input_dim = 3
elif args.dataset == 'mnist':
print("Training on mnist")
args.backbone = 'cnn'
if args.root == isic_root:
args.root = mnist_root
args.batch_size = 128
args.image_size = 28
args.classnum = 10
args.input_dim = 1
args.linear_num = 144
args.datanum = 60000
args.lr = 0.001
args.lr2 = 0.0001
elif args.dataset == 'pcam':
if args.root == isic_root:
args.root = pcam_root
args.backbone = 'densenet169'
args.batch_size = 128
args.image_size = 96
args.dim_reduce = 128
args.classnum = 2
args.input_dim = 3
args.stage1 = 70
args.stage2 = 200
args.epochs = 320
args.datanum = 262144
args.train_redux = 26214
args.test_redux = 3276
args.val_redux = 3276
args.random_ind_redux = False
else:
print("Use default setting")
return args
``` |
{
"source": "JiarunLiu/EvidentialMix",
"score": 2
} |
#### File: JiarunLiu/EvidentialMix/edl_losses.py
```python
import torch
import torch.nn.functional as F
import numpy as np
import pdb
def one_hot_embedding(labels, num_classes=10):
y = torch.eye(num_classes)
neg = labels < 0 # negative labels
labels[neg] = 0 # placeholder label to class-0
y = y[labels] # create one hot embedding
y[neg, 0] = 0 # remove placeholder label
return y
def relu_evidence(y):
return F.relu(y)
def exp_evidence(y):
return torch.exp(torch.clamp(y, -10, 10))
def softplus_evidence(y):
return F.softplus(y)
def edl_mse_loss(output, target, device='cuda'):
evidence = F.relu(output)
alpha = evidence + 1
target = target.to(device)
alpha = alpha.to(device)
S = torch.sum(alpha, dim=1, keepdim=True)
err = torch.sum(
(target - (alpha / S)) ** 2, dim=1, keepdim=True)
var = torch.sum(
alpha * (S - alpha) / (S * S * (S + 1)), dim=1, keepdim=True)
return err + var
def edl_mae_loss(output, target, device='cuda'):
evidence = F.relu(output)
alpha = evidence + 1
target = target.to(device)
alpha = alpha.to(device)
S = torch.sum(alpha, dim=1, keepdim=True)
err = torch.sum(
torch.abs(target - (alpha / S)), dim=1, keepdim=True)
var = torch.sum(
alpha * (S - alpha) / (S * S * (S + 1)), dim=1, keepdim=True)
return err + var
def edl_soft_mse_loss(output, target, device='cuda'):
alpha = F.softmax(output)
target = target.to(device)
alpha = alpha.to(device)
S = torch.sum(alpha, dim=1, keepdim=True)
err = torch.sum(
(target - (alpha / S)) ** 2, dim=1, keepdim=True)
var = torch.sum(
alpha * (S - alpha) / (S * S * (S + 1)), dim=1, keepdim=True)
return err + var
``` |
{
"source": "JiarunLiu/mixmo-pytorch",
"score": 2
} |
#### File: mixmo/networks/resnet.py
```python
import torch
import torch.nn as nn
from torch.nn import functional as F
from mixmo.augmentations import mixing_blocks
from mixmo.utils import torchutils
from mixmo.utils.logger import get_logger
LOGGER = get_logger(__name__, level="DEBUG")
BATCHNORM_MOMENTUM_PREACT = 0.1
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, inplanes, planes, stride=1, **kwargs):
super(PreActBlock, self).__init__()
final_planes = planes * self.expansion
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(inplanes, momentum=BATCHNORM_MOMENTUM_PREACT)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BATCHNORM_MOMENTUM_PREACT)
if stride != 1 or inplanes != final_planes:
self.shortcut = nn.Sequential(
nn.Conv2d(inplanes, final_planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out
class PreActResNet(nn.Module):
"""
Pre-activated ResNet network
"""
def __init__(self, config_network, config_args):
nn.Module.__init__(self)
self.config_network = config_network
self.config_args = config_args
self._define_config()
self._init_first_layer()
self._init_core_network()
self._init_final_classifier()
self._init_weights_resnet()
LOGGER.warning("Features dimension: {features_dim}".format(features_dim=self.features_dim))
def _define_config(self):
"""
Initialize network parameters from specified config
"""
# network config
self.num_classes = self.config_args["data"]["num_classes"]
self.depth = self.config_network["depth"]
self._init_block(widen_factor=self.config_network["widen_factor"])
def _init_block(self, widen_factor):
"""
Build list of residual blocks for networks on the CIFAR datasets
Network type specifies number of layers for CIFAR network
"""
blocks = {
18: PreActBlock,
}
layers = {
18: [2, 2, 2, 2],
}
assert layers[
self.depth
], 'invalid depth for ResNet (self.depth should be one of 18, 34, 50, 101, 152, and 200)'
self._layers = layers[self.depth]
self._block = blocks[self.depth]
assert widen_factor in [1., 2., 3.]
self._nChannels = [
64,
64 * widen_factor, 128 * widen_factor,
256 * widen_factor, 512 * widen_factor
]
def _init_first_layer(self):
assert self.config_args["num_members"] == 1
self.conv1 = self._make_conv1(nb_input_channel=3)
def _init_core_network(self, max_layer=4):
"""
Build the core of the Residual network (residual blocks)
"""
self.inplanes = self._nChannels[0]
self.layer1 = self._make_layer(self._block, planes=self._nChannels[1],
blocks=self._layers[0], stride=1)
self.layer2 = self._make_layer(self._block, planes=self._nChannels[2],
blocks=self._layers[1], stride=2)
self.layer3 = self._make_layer(self._block, planes=self._nChannels[3],
blocks=self._layers[2], stride=2)
if max_layer == 4:
self.layer4 = self._make_layer(self._block, self._nChannels[4], blocks=self._layers[3], stride=2)
self.features_dim = self._nChannels[-1] * self._block.expansion
def _make_conv1(self, nb_input_channel):
conv1 = nn.Conv2d(
nb_input_channel, self._nChannels[0], kernel_size=3, stride=2, padding=1, bias=False
)
return conv1
def _make_layer(
self,
block,
planes,
blocks,
stride=1,
):
"""
Build a layer of successive (residual) blocks
"""
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(
inplanes=self.inplanes,
planes=planes,
stride=stride,
downsample=downsample)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,))
return nn.Sequential(*layers)
def _init_final_classifier(self):
"""
Build linear classification head
"""
self.fc = nn.Linear(self.features_dim, self.num_classes)
dense_gaussian = True
def _init_weights_resnet(self):
"""
Apply specified random initializations to all modules of the network
"""
for m in self.modules():
torchutils.weights_init_hetruncatednormal(m, dense_gaussian=self.dense_gaussian)
def forward(self, x):
if isinstance(x, dict):
metadata = x["metadata"] or {}
pixels = x["pixels"]
else:
metadata = {"mode": "inference"}
pixels = x
merged_representation = self._forward_first_layer(pixels, metadata)
extracted_features = self._forward_core_network(merged_representation)
dict_output = self._forward_final_classifier(extracted_features)
return dict_output
def _forward_first_layer(self, pixels, metadata=None):
return self.conv1(pixels)
def _forward_core_network(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x_avg = F.avg_pool2d(x, 4)
return x_avg.view(x_avg.size(0), -1)
def _forward_final_classifier(self, extracted_features):
x = self.fc(extracted_features)
dict_output = {"logits": x, "logits_0": x}
return dict_output
class PreActResNetMixMo(PreActResNet):
"""
Multi-Input Multi-Output ResNet network
"""
def _init_first_layer(self):
"""
Initialize the M input heads/encoders
"""
list_conv1 = []
for _ in range(0, self.config_args["num_members"]):
list_conv1.append(self._make_conv1(nb_input_channel=3))
self.list_conv1 = nn.ModuleList(list_conv1)
def _init_final_classifier(self):
"""
Initialize the M output heads/classifiers
"""
list_fc = []
for _ in range(0, self.config_args["num_members"]):
fc = nn.Linear(self.features_dim, self.num_classes)
list_fc.append(fc)
self.list_fc = nn.ModuleList(list_fc)
def _forward_first_layer(self, pixels, metadata):
metadata = metadata or {}
list_lfeats = []
# Embed the M inputs into the shared space
for num_member in range(0, self.config_args["num_members"]):
if pixels.size(1) == 3:
pixels_member = pixels
else:
pixels_member = pixels[:, 3*num_member:3*(num_member + 1)]
list_lfeats.append(self.list_conv1[num_member](pixels_member))
# Mix the M inputs in the shared space
merged_representation = mixing_blocks.mix_manifolds(list_lfeats, metadata=metadata)
return merged_representation
def _forward_final_classifier(self, extracted_features):
dict_output = {}
# compute individual logits
for num_member in range(0, self.config_args["num_members"]):
logits_n = self.list_fc[num_member](extracted_features)
dict_output["logits_" + str(num_member)] = logits_n
# compute ensemble logits by averaging
_list_logits = [
dict_output["logits_" + str(num_member)]
for num_member in range(0, self.config_args["num_members"])
]
dict_output["logits"] = torch.stack(_list_logits, dim=0).mean(dim=0)
return dict_output
resnet_network_factory = {
# For TinyImageNet
"resnet": PreActResNet,
"resnetmixmo": PreActResNetMixMo,
}
```
#### File: mixmo/utils/visualize.py
```python
import matplotlib.pyplot as plt
import torchvision
import numpy as np
__all__ = ['make_image', 'show_batch', 'write_calibration']
def write_calibration(
avg_confs_in_bins,
acc_in_bin_list,
prop_bin,
min_bin,
max_bin,
min_pred=0,
write_file=None,
suffix=""
):
"""
Utility function to show calibration through a histogram of classifier confidences
"""
fig, ax1 = plt.subplots()
ax1.plot([min_pred, 1], [min_pred, 1], "k:", label="Perfectly calibrated")
ax1.plot(avg_confs_in_bins,
acc_in_bin_list,
"s-",
label="%s" % ("Discriminator Calibration"))
if write_file:
suffix = write_file.split("/")[-1].split(".")[0].split("_")[0] + "_" + suffix
ax1.set_xlabel(f"Mean predicted value {suffix}")
ax1.set_ylabel("Accuracy")
ymin = min(acc_in_bin_list + [min_pred])
ax1.set_ylim([ymin, 1.0])
ax1.legend(loc="lower right")
ax2 = ax1.twinx()
ax2.hlines(prop_bin, min_bin, max_bin, label="%s" % ("Proportion in each bin"), color="r")
ax2.set_ylabel("Proportion")
ax2.legend(loc="upper center")
if not write_file:
plt.tight_layout()
plt.show()
else:
fig.savefig(
write_file
)
def make_image(img, mean=None, std=None, normalize=True):
"""
Transform a CIFAR numpy image into a pytorch image (need to swap dimensions)
"""
if mean is None and std is None:
from mixmo.augmentations.standard_augmentations import cifar_mean, cifar_std
mean = cifar_mean
std = cifar_std
npimg = img.numpy().copy()
if normalize:
for i in range(0, 3):
npimg[i] = npimg[i] * std[i] + mean[i] # unnormalize
return np.transpose(npimg, (1, 2, 0))
def show_batch(images, normalize=True):
"""
Plot images in a batch of images
"""
images = make_image(torchvision.utils.make_grid(images), normalize=normalize)
plt.imshow(images)
plt.show()
return images
```
#### File: mixmo-pytorch/scripts/templateutils_mixmo.py
```python
import os
import copy
import argparse
import datetime
from shutil import rmtree
from mixmo.utils import (misc, logger)
LOGGER = logger.get_logger(__name__, level="DEBUG")
DICT_NETWORK = {
# Tiny ImageNet with PreActResNet-18-w
"res18": {
"classifier": "resnet",
"depth": 18,
"widen_factor": 1,
"num_members": 1,
},
"res18-2": {
"classifier": "resnetmixmo",
"depth": 18,
"widen_factor": 1,
"num_members": 2,
},
"res182": {
"classifier": "resnet",
"depth": 18,
"widen_factor": 2,
"num_members": 1,
},
"res182-2": {
"classifier": "resnetmixmo",
"depth": 18,
"widen_factor": 2,
"num_members": 2,
},
"res183": {
"classifier": "resnet",
"depth": 18,
"widen_factor": 3,
"num_members": 1,
},
"res183-2": {
"classifier": "resnetmixmo",
"depth": 18,
"widen_factor": 3,
"num_members": 2,
},
# CIFAR with WideResNet-28-10
"wrn2810": {
"classifier": "wideresnet",
"depth": 28,
"widen_factor": 10,
"num_members": 1,
},
"wrn2810-2": {
"classifier": "wideresnetmixmo",
"depth": 28,
"widen_factor": 10,
"num_members": 2,
},
"wrn2810-3": {
"classifier": "wideresnetmixmo",
"depth": 28,
"widen_factor": 10,
"num_members": 3,
},
}
DICT_DATASET_CONFIG = {
"cifar10": {
"shared_config": {
"num_classes": 10,
"dataset_name": "cifar",
},
"templates": [
{
"networktype":
"wrn2810",
"trainingfiltering": [
{
"mixmoparams": ["1net"],
"dataaugparams": ["standard", "msdamixup", "msdacutmix"],
"scheduling": ["bar1"],
},
]
},
{
"networktype":
"wrn2810-2",
"trainingfiltering": [
{
"mixmoparams": ["mimo", "linearmixmo", "cutmixmo-p5"],
"dataaugparams": ["standard"],
"scheduling": ["bar4"],
},
{
"mixmoparams": ["linearmixmo", "cutmixmo-p5"],
"dataaugparams": ["msdacutmix"],
"scheduling": ["bar4"],
},
]
},
],
},
"cifar100": {
"shared_config": {
"num_classes": 100,
"dataset_name": "cifar",
},
"templates": [
{
"networktype":
"wrn2810",
"trainingfiltering": [
{
"mixmoparams": ["1net"],
"dataaugparams": ["standard", "msdamixup", "msdacutmix"],
"scheduling": ["bar1"],
},
]
},
{
"networktype":
"wrn2810-2",
"trainingfiltering": [
{
"mixmoparams": ["mimo", "linearmixmo", "cutmixmo-p5"],
"dataaugparams": ["standard"],
"scheduling": ["bar4"],
},
{
"mixmoparams": ["linearmixmo", "cutmixmo-p5"],
"dataaugparams": ["msdacutmix"],
"scheduling": ["bar4"],
},
]
},
],
},
"tinyimagenet": {
"shared_config": {
"num_classes": 200,
"dataset_name": "tinyimagenet",
},
"templates": [
{
"networktype":
"res18",
"trainingfiltering": [
{
"mixmoparams": ["1net"],
"dataaugparams": ["standard", "msdamixup", "msdacutmix"],
"scheduling": ["bar1"],
},
]
},
{
"networktype":
"res182",
"trainingfiltering": [
{
"mixmoparams": ["1net"],
"dataaugparams": ["standard"],
"scheduling": ["bar1"],
},
]
},
{
"networktype":
"res183",
"trainingfiltering": [
{
"mixmoparams": ["1net"],
"dataaugparams": ["standard"],
"scheduling": ["bar1"],
},
]
},
{
"networktype":
"res182-2",
"trainingfiltering": [
{
"mixmoparams": ["linearmixmo", "cutmixmo-p5"],
"dataaugparams": ["standard"],
"scheduling": ["bar4"],
},
]
},
{
"networktype":
"res183-2",
"trainingfiltering": [
{
"mixmoparams": ["linearmixmo", "cutmixmo-p5"],
"dataaugparams": ["standard"],
"scheduling": ["bar4"],
},
]
}
],
},
}
DICT_CONFIG = {
"scheduling": {
"tinyimagenet": {
"_default": {
"nb_epochs": 1200,
"batch_size": 100,
# regularization
"weight_decay_sgd": 1e-4,
"l2_reg": 0,
# lr
"milestone1": 600,
"milestone2": 900,
"milestone3": -1,
},
# tinymagenet
"bar1": {
"batch_repetitions": 1,
"warmup_period": 1 * 1000,
"lrinit": 0.2 / 1,
},
"bar2": {
"batch_repetitions": 2,
"warmup_period": 2 * 1000,
"lrinit": 0.2 / 2,
},
"bar4": {
"batch_repetitions": 4,
"warmup_period": 4 * 1000,
"lrinit": 0.2 / 4,
},
},
"cifar": {
"_default": {
"nb_epochs": 300,
"batch_size": 64,
# lr
"weight_decay_sgd": 0,
"l2_reg": 0.0003,
# lrsche
"milestone1": 101,
"milestone2": 201,
"milestone3": 226,
},
"bar1": {
"warmup_period": 782 * 1,
"batch_repetitions": 1,
"lrinit": 0.1 / (2 * 1),
},
"bar2": {
"warmup_period": 782 * 2,
"batch_repetitions": 2,
"lrinit": 0.1 / (2 * 2),
},
"bar4": {
"warmup_period": 782 * 4,
"batch_repetitions": 4,
"lrinit": 0.1 / (2 * 4),
},
}
},
"mixmoparams": {
"_default": {
"mixmo_mix_method_name": "null",
"mixmo_mix_prob": 1,
"mixmo_alpha": 2,
"mixmo_weight_root": 3
},
"1net": {},
"mimo": {
"mixmo_mix_method_name": "mixup",
"mixmo_alpha": 0,
"mixmo_weight_root": 1,
},
"linearmixmo": {
"mixmo_mix_method_name": "mixup",
},
"cutmixmo-p5": {
"mixmo_mix_method_name": "cutmix",
"mixmo_mix_prob": 0.5
},
"cutmixmo-p5-a4": {
"mixmo_mix_method_name": "cutmix",
"mixmo_mix_prob": 0.5,
"mixmo_alpha": 4,
},
"cutmixmo-p5-r1": {
"mixmo_mix_method_name": "cutmix",
"mixmo_mix_prob": 0.5,
"mixmo_weight_root": 1,
},
"cutmixmo-p2": {
"mixmo_mix_method_name": "cutmix",
"mixmo_mix_prob": 0.2
},
"cowmixmo-p5": {
"mixmo_mix_method_name": "cow",
"mixmo_mix_prob": 0.5
},
},
"dataaugparams": {
"_default": {
"msda_mix_method": "null",
"da_method": "null",
},
"standard": {},
"daaugmix": {
"da_method": "augmix"
},
"msdamixup": {
"msda_mix_method": "mixup",
},
"msdacutmix": {
"msda_mix_method": "cutmix",
},
}
}
def use_template(
template_path,
output_path,
params,
):
"""
Open a template file and fill it with the vars in params
to write the result in output_path
"""
with open(template_path, 'r') as f_template:
template = f_template.read()
content = template % params
with open(output_path, 'w') as f_out:
f_out.write(content)
def create_templates(template_path, config_dir, dataset):
if os.path.exists(config_dir):
LOGGER.debug("Folder templates already exists")
rmtree(config_dir)
os.mkdir(config_dir)
template_output_path = os.path.join(
config_dir,
"exp_{dataset}_{networktype}_{mixmoparams}_{dataaugparams}_{scheduling}.yaml"
)
for dict_template in DICT_DATASET_CONFIG[dataset]["templates"]:
params = copy.deepcopy(DICT_DATASET_CONFIG[dataset]["shared_config"])
params.update(DICT_NETWORK[dict_template["networktype"]])
save_params = copy.deepcopy(params)
for trainingfiltering in dict_template["trainingfiltering"]:
for imixmo in trainingfiltering["mixmoparams"]:
for idataaug in trainingfiltering["dataaugparams"]:
for ische in trainingfiltering["scheduling"]:
misc.clean_update(
params, DICT_CONFIG["mixmoparams"]["_default"]
)
misc.update(params, DICT_CONFIG["mixmoparams"][imixmo], method="dirty")
misc.clean_update(
params, DICT_CONFIG["dataaugparams"]["_default"]
)
misc.update(params, DICT_CONFIG["dataaugparams"][idataaug], method="dirty")
misc.clean_update(
params, DICT_CONFIG["scheduling"][params['dataset_name']]["_default"]
)
misc.clean_update(
params, DICT_CONFIG["scheduling"][params['dataset_name']][ische]
)
# templating
output_path = template_output_path.format(**{
"dataset": dataset,
"networktype": dict_template["networktype"],
"scheduling": ische,
"mixmoparams": imixmo,
"dataaugparams": idataaug
})
if os.path.exists(output_path):
raise ValueError(output_path)
output_path = use_template(
template_path=template_path,
output_path=output_path,
params=params,
)
params = copy.deepcopy(save_params)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--template_path",
"-t",
type=str,
default="scripts/exp_mixmo_template.yaml",
help="Path to config template"
)
parser.add_argument(
"--config_dir", "-c", type=str, default="config/", help="Folder to save these new configs"
)
parser.add_argument(
"--dataset",
default="cifar100",
help="dataset name",
)
args = parser.parse_args()
misc.print_args(args)
return args
if __name__ == "__main__":
args = parse_args()
create_templates(
template_path=args.template_path,
config_dir=args.config_dir,
dataset=args.dataset,
)
``` |
{
"source": "JiarunLiu/ViT-CIFAR",
"score": 3
} |
#### File: JiarunLiu/ViT-CIFAR/layers.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchsummary
class TransformerEncoder(nn.Module):
def __init__(self, feats:int, mlp_hidden:int, head:int=8, dropout:float=0.):
super(TransformerEncoder, self).__init__()
self.la1 = nn.LayerNorm(feats)
self.msa = MultiHeadSelfAttention(feats, head=head, dropout=dropout)
self.la2 = nn.LayerNorm(feats)
self.mlp = nn.Sequential(
nn.Linear(feats, mlp_hidden),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(mlp_hidden, feats),
nn.GELU(),
nn.Dropout(dropout),
)
def forward(self, x):
out = self.msa(self.la1(x)) + x
out = self.mlp(self.la2(out)) + out
return out
class MultiHeadSelfAttention(nn.Module):
def __init__(self, feats:int, head:int=8, dropout:float=0.):
super(MultiHeadSelfAttention, self).__init__()
self.head = head
self.feats = feats
self.sqrt_d = self.feats**0.5
self.q = nn.Linear(feats, feats)
self.k = nn.Linear(feats, feats)
self.v = nn.Linear(feats, feats)
self.o = nn.Linear(feats, feats)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
b, n, f = x.size()
q = self.q(x).view(b, n, self.head, self.feats//self.head).transpose(1,2)
k = self.k(x).view(b, n, self.head, self.feats//self.head).transpose(1,2)
v = self.v(x).view(b, n, self.head, self.feats//self.head).transpose(1,2)
score = F.softmax(torch.einsum("bhif, bhjf->bhij", q, k)/self.sqrt_d, dim=-1) #(b,h,n,n)
attn = torch.einsum("bhij, bhjf->bihf", score, v) #(b,n,h,f//h)
o = self.dropout(self.o(attn.flatten(2)))
return o
class MultiHeadDepthwiseSelfAttention(nn.Module):
def __init__(self, feats:int, head:int=8, dropout:float=0):
super(MultiHeadDepthwiseSelfAttention, self).__init__()
...
def forward(self, x):
...
if __name__=="__main__":
b,n,f = 4, 16, 128
x = torch.randn(b,n,f)
# net = MultiHeadSelfAttention(f)
net = TransformerEncoder(f)
torchsummary.summary(net, (n,f))
# out = net(x)
# print(out.shape)
```
#### File: JiarunLiu/ViT-CIFAR/noisy_dataset.py
```python
from __future__ import print_function
import os
import os.path
import hashlib
import errno
import torch
import numpy as np
from numpy.testing import assert_array_almost_equal
def check_integrity(fpath, md5):
if not os.path.isfile(fpath):
return False
md5o = hashlib.md5()
with open(fpath, 'rb') as f:
# read in 1MB chunks
for chunk in iter(lambda: f.read(1024 * 1024), b''):
md5o.update(chunk)
md5c = md5o.hexdigest()
if md5c != md5:
return False
return True
def download_url(url, root, filename, md5):
from six.moves import urllib
root = os.path.expanduser(root)
fpath = os.path.join(root, filename)
try:
os.makedirs(root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
# downloads file
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(url, fpath)
except:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(url, fpath)
def list_dir(root, prefix=False):
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = list(
filter(
lambda p: os.path.isdir(os.path.join(root, p)),
os.listdir(root)
)
)
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root, suffix, prefix=False):
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
# basic function
def multiclass_noisify(y, P, random_state=0):
""" Flip classes according to transition probability matrix T.
It expects a number between 0 and the number of classes - 1.
"""
print(np.max(y), P.shape[0])
assert P.shape[0] == P.shape[1]
assert np.max(y) < P.shape[0]
# row stochastic matrix
assert_array_almost_equal(P.sum(axis=1), np.ones(P.shape[1]))
assert (P >= 0.0).all()
m = y.shape[0]
print(m)
new_y = y.copy()
flipper = np.random.RandomState(random_state)
for idx in np.arange(m):
i = y[idx]
# draw a vector with only an 1
flipped = flipper.multinomial(1, P[i, :][0], 1)[0]
new_y[idx] = np.where(flipped == 1)[0]
return new_y
# noisify_pairflip call the function "multiclass_noisify"
def noisify_pairflip(y_train, noise, random_state=None, nb_classes=10):
"""mistakes:
flip in the pair
"""
P = np.eye(nb_classes)
n = noise
if n > 0.0:
# 0 -> 1
P[0, 0], P[0, 1] = 1. - n, n
for i in range(1, nb_classes-1):
P[i, i], P[i, i + 1] = 1. - n, n
P[nb_classes-1, nb_classes-1], P[nb_classes-1, 0] = 1. - n, n
y_train_noisy = multiclass_noisify(y_train, P=P,
random_state=random_state)
actual_noise = (y_train_noisy != y_train).mean()
assert actual_noise > 0.0
print('Actual noise %.2f' % actual_noise)
y_train = y_train_noisy
print(P)
return y_train, actual_noise
def noisify_multiclass_symmetric(y_train, noise, random_state=None, nb_classes=10):
"""mistakes:
flip in the symmetric way
"""
P = np.ones((nb_classes, nb_classes))
n = noise
P = (n / (nb_classes - 1)) * P
if n > 0.0:
# 0 -> 1
P[0, 0] = 1. - n
for i in range(1, nb_classes-1):
P[i, i] = 1. - n
P[nb_classes-1, nb_classes-1] = 1. - n
y_train_noisy = multiclass_noisify(y_train, P=P,
random_state=random_state)
actual_noise = (y_train_noisy != y_train).mean()
assert actual_noise > 0.0
print('Actual noise %.2f' % actual_noise)
y_train = y_train_noisy
print(P)
return y_train, actual_noise
def noisify(dataset='mnist', nb_classes=10, train_labels=None, noise_type=None, noise_rate=0, random_state=0):
if noise_type == 'pairflip':
train_noisy_labels, actual_noise_rate = noisify_pairflip(train_labels, noise_rate, random_state=0, nb_classes=nb_classes)
if noise_type == 'symmetric' or noise_type == 'sn':
train_noisy_labels, actual_noise_rate = noisify_multiclass_symmetric(train_labels, noise_rate, random_state=0, nb_classes=nb_classes)
return train_noisy_labels, actual_noise_rate
class DatasetWrapper(torch.utils.data.Dataset):
"""Noise Dataset Wrapper"""
def __init__(self, dataset, noise_type='clean', noise_rate=0,
yfile=None, weights_file=None, noise_train=False,
only_labeled=False, num_cls=10):
"""
Args:
dataset: the dataset to wrap, it should be an classification dataset
noise_type: how to add noise for label: [clean/symmetric/asymmetric]
noise_rate: noise ratio of adding noise
yfile: The directory for the "y.npy" file. Once yfile assigned, we
will load yfile as labels and the given noise option will be
neglect. The weight of each sample will set to an binary
value according to the matching result of origin labels.
weights_file: The weights for each samples, it should be an .npy
file of shape [len(dataset)] with either binary value or
probability value between [0,1]. "Specifically, all of the
unlabeled data should have zero-weight." The loaded weights
will multiply with the exists noise_or_not. So, it's ok to
give an weights for labeled data (noisy or clean).
"""
self.dataset = dataset
self.noise_type = noise_type
self.noise_rate = noise_rate
self.num_cls = num_cls
if yfile is not None:
yy = np.load(yfile)
assert len(yy) == len(dataset)
self.labels_to_use = yy
# give zero-weights for incorrect sample
self.weights = (self.labels_to_use == np.asarray(dataset.targets))
self.noise_rate = 1 - (np.sum(self.weights) / len(self.weights))
self.noise_type = "preload"
elif noise_type == "clean":
self.weights = np.ones(len(dataset))
self.labels_to_use = dataset.targets
else:
# noisify labels
train_clean_labels = np.expand_dims(np.asarray(dataset.targets), 1)
train_noisy_labels, _ = noisify(train_labels=train_clean_labels,
nb_classes=self.num_cls,
noise_type=noise_type,
noise_rate=noise_rate)
self.labels_to_use = train_noisy_labels.flatten()
assert len(self.labels_to_use) == len(dataset.targets)
self.weights = (np.transpose(self.labels_to_use) ==
np.transpose(train_clean_labels)).squeeze()
if noise_train:
self.weights = np.ones(len(dataset))
if weights_file is not None:
# weights_file can be weights.npy or labeled.npy
assert self.noise_type in ['preload', 'clean']
self.useit = np.load(weights_file)
assert len(self.useit) == len(dataset)
if self.useit.dtype == np.bool:
self.useit = self.useit.astype(np.float)
self.weights = self.weights * self.useit
if only_labeled:
print("Removing unlabeled data for training efficiency...")
origin_targets = np.asarray(dataset.targets)
origin_data = dataset.data
new_targets = origin_targets[self.weights != 0]
new_data = origin_data[self.weights != 0]
dataset.targets = new_targets
dataset.data = new_data
self.labels_to_use = np.asarray(self.labels_to_use)
self.labels_to_use = self.labels_to_use[self.weights != 0]
if weights_file is not None:
self.useit = self.useit[self.weights != 0]
self.weights = self.weights[self.weights != 0]
print("Removed {} data with 0 weights!!!".format(
len(origin_targets)-len(new_targets)))
def save_noise_labels(self, dir):
np.save(dir, np.asarray(self.labels_to_use))
def __getitem__(self, index):
# self.noise_or_not can expand to the weights of sample. So we can load
# Semi-Supervised dataset here.
img, target_gt = self.dataset[index]
target_use = self.labels_to_use[index]
weights = self.weights[index]
# return img, target_use, target_gt, weights
return img, target_use
def __len__(self):
return len(self.dataset)
``` |
{
"source": "jiarunw/Emonie",
"score": 3
} |
#### File: Emonie/source/kerouz_CNN.py
```python
from keras.layers import *
from keras.models import Sequential
from keras.preprocessing import sequence
def model_training(vocab_size, weight_matrix, l3, X_train, Y_train, embed_dim=50, epochs=5, ):
# embedding_layer = Embedding(vocab_size, embed_dim, weights=[weight_matrix], input_length=l3, trainable=False )
# sequence_input = Input(shape=(l3,), dtype='int32')
# embedded_sequences = embedding_layer(sequence_input)
# x = Conv1D(128, 25, activation='relu', padding='same')(embedded_sequences)
# x = MaxPooling1D(25, padding='same')(x)
# x = Conv1D(128, 2, activation='relu', padding='same')(x)
# x = MaxPooling1D(2, padding='same')(x)
# x = Conv1D(128, 1, activation='relu', padding='same')(x)
# x = MaxPooling1D(35, padding='same')(x) # global max pooling
# x = Flatten()(x)
# x = Dense(128, activation='relu')(x)
# preds = Dense(20, activation='softmax')(x)
# model = model(sequence_input, preds)
# model.compile(loss='categorical_crossentropy',
# optimizer='rmsprop',
# metrics=['acc'])
# # happy learning!
# model.fit(X_train, Y_train, epochs=epochs, batch_size=128, shuffle=True, validation_split=0.15)
model = Sequential()
#model.add(InputLayer(shape=(l3,), dtype='int32', name='x_input'))
model.add(Embedding(vocab_size, embed_dim, weights=[weight_matrix], input_length=l3, trainable=False ))
model.add(Conv1D(128, 5, activation='relu', padding='same'))
model.add(MaxPooling1D(5, padding='same'))
model.add(Conv1D(128, 5, activation='relu', padding='same'))
model.add(MaxPooling1D(5, padding='same'))
model.add(Conv1D(128, 5, activation='relu', padding='same'))
model.add(MaxPooling1D(35, padding='same'))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(20, activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='rmsprop', metrics=['accuracy'])
model.summary()
model.fit(X_train, Y_train, epochs=epochs, batch_size=128, shuffle=True, validation_split=0.15)
model.evaluate(X_train, Y_train)
return model
def toknz(pred_corpus, l1,tokenizer):
seqed_corpus = tokenizer.texts_to_sequences(pred_corpus) # 将数据集序列化,就是把句中每一个单词编号
X_out = sequence.pad_sequences(seqed_corpus, maxlen=l1, padding='post') # 填充与截断序列,就是使得每句话对应的序列长度都是'maxlen'
return X_out,tokenizer
``` |
{
"source": "jiaruonan/transferlearning",
"score": 3
} |
#### File: base/loss/mutual_info.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class Mine_estimator(nn.Module):
def __init__(self, input_dim=2048, hidden_dim=512):
super(Mine_estimator, self).__init__()
self.mine_model = Mine(input_dim, hidden_dim)
def forward(self, X, Y):
Y_shffle = Y[torch.randperm(len(Y))]
loss_joint = self.mine_model(X, Y)
loss_marginal = self.mine_model(X, Y_shffle)
ret = torch.mean(loss_joint) - \
torch.log(torch.mean(torch.exp(loss_marginal)))
loss = -ret
return loss
class Mine(nn.Module):
def __init__(self, input_dim=2048, hidden_dim=512):
super(Mine, self).__init__()
self.fc1_x = nn.Linear(input_dim, hidden_dim)
self.fc1_y = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, 1)
def forward(self, x, y):
h1 = F.leaky_relu(self.fc1_x(x)+self.fc1_y(y))
h2 = self.fc2(h1)
return h2
```
#### File: base/loss/pair_dist.py
```python
import torch
import numpy as np
def pairwise_dist(X, Y):
n, d = X.shape
m, _ = Y.shape
assert d == Y.shape[1]
a = X.unsqueeze(1).expand(n, m, d)
b = Y.unsqueeze(0).expand(n, m, d)
return torch.pow(a - b, 2).sum(2)
def pairwise_dist_np(X, Y):
n, d = X.shape
m, _ = Y.shape
assert d == Y.shape[1]
a = np.expand_dims(X, 1)
b = np.expand_dims(Y, 0)
a = np.tile(a, (1, m, 1))
b = np.tile(b, (n, 1, 1))
return np.power(a - b, 2).sum(2)
def pa(X, Y):
XY = np.dot(X, Y.T)
XX = np.sum(np.square(X), axis=1)
XX = np.transpose([XX])
YY = np.sum(np.square(Y), axis=1)
dist = XX + YY - 2 * XY
return dist
if __name__ == '__main__':
import sys
args = sys.argv
data = args[0]
print(data)
# a = torch.arange(1, 7).view(2, 3)
# b = torch.arange(12, 21).view(3, 3)
# print(pairwise_dist(a, b))
# a = np.arange(1, 7).reshape((2, 3))
# b = np.arange(12, 21).reshape((3, 3))
# print(pa(a, b))
```
#### File: adarnn/dataset/data_process.py
```python
import os
import dataset.data_act as data_act
import pandas as pd
import dataset.data_weather as data_weather
import datetime
from base.loss_transfer import TransferLoss
import torch
import math
from dataset import data_process
def load_act_data(data_folder, batch_size=64, domain="1_20"):
x_train, y_train, x_test, y_test = data_act.load_data(data_folder, domain)
x_train, x_test = x_train.reshape(
(-1, x_train.shape[2], 1, x_train.shape[1])), x_test.reshape((-1, x_train.shape[2], 1, x_train.shape[1]))
transform = None
train_set = data_act.data_loader(x_train, y_train, transform)
test_set = data_act.data_loader(x_test, y_test, transform)
train_loader = data_act.DataLoader(
train_set, batch_size=batch_size, shuffle=True, drop_last=True)
test_loader = data_act.DataLoader(
test_set, batch_size=batch_size, shuffle=False)
return train_loader, train_loader, test_loader
def load_weather_data(file_path, batch_size=6, station='Changping'):
data_file = os.path.join(file_path, "PRSA_Data_1.pkl")
mean_train, std_train = data_weather.get_weather_data_statistic(data_file, station=station, start_time='2013-3-1 0:0',
end_time='2016-10-30 23:0')
train_loader = data_weather.get_weather_data(data_file, station=station, start_time='2013-3-6 0:0',
end_time='2015-5-31 23:0', batch_size=batch_size, mean=mean_train, std=std_train)
valid_train_loader = data_weather.get_weather_data(data_file, station=station, start_time='2015-6-2 0:0',
end_time='2016-6-30 23:0', batch_size=batch_size, mean=mean_train, std=std_train)
valid_vld_loader = data_weather.get_weather_data(data_file, station=station, start_time='2016-7-2 0:0',
end_time='2016-10-30 23:0', batch_size=batch_size, mean=mean_train, std=std_train)
test_loader = data_weather.get_weather_data(data_file, station=station, start_time='2016-11-2 0:0',
end_time='2017-2-28 23:0', batch_size=batch_size, mean=mean_train, std=std_train)
return train_loader, valid_train_loader, valid_vld_loader, test_loader
def get_split_time(num_domain=2, mode='pre_process', data_file = None, station = None, dis_type = 'coral'):
spilt_time = {
'2': [('2013-3-6 0:0', '2015-5-31 23:0'), ('2015-6-2 0:0', '2016-6-30 23:0')]
}
if mode == 'pre_process':
return spilt_time[str(num_domain)]
if mode == 'tdc':
return TDC(num_domain, data_file, station, dis_type = dis_type)
else:
print("error in mode")
def TDC(num_domain, data_file, station, dis_type = 'coral'):
start_time = datetime.datetime.strptime(
'2013-03-01 00:00:00', '%Y-%m-%d %H:%M:%S')
end_time = datetime.datetime.strptime(
'2016-06-30 23:00:00', '%Y-%m-%d %H:%M:%S')
num_day = (end_time - start_time).days
split_N = 10
data=pd.read_pickle(data_file)[station]
feat =data[0][0:num_day]
feat=torch.tensor(feat, dtype=torch.float32)
feat_shape_1 = feat.shape[1]
feat =feat.reshape(-1, feat.shape[2])
feat = feat.cuda()
# num_day_new = feat.shape[0]
selected = [0, 10]
candidate = [1, 2, 3, 4, 5, 6, 7, 8, 9]
start = 0
if num_domain in [2, 3, 5, 7, 10]:
while len(selected) -2 < num_domain -1:
distance_list = []
for can in candidate:
selected.append(can)
selected.sort()
dis_temp = 0
for i in range(1, len(selected)-1):
for j in range(i, len(selected)-1):
index_part1_start = start + math.floor(selected[i-1] / split_N * num_day) * feat_shape_1
index_part1_end = start + math.floor(selected[i] / split_N * num_day) * feat_shape_1
feat_part1 = feat[index_part1_start: index_part1_end]
index_part2_start = start + math.floor(selected[j] / split_N * num_day) * feat_shape_1
index_part2_end = start + math.floor(selected[j+1] / split_N * num_day) * feat_shape_1
feat_part2 = feat[index_part2_start:index_part2_end]
criterion_transder = TransferLoss(loss_type= dis_type, input_dim=feat_part1.shape[1])
dis_temp += criterion_transder.compute(feat_part1, feat_part2)
distance_list.append(dis_temp)
selected.remove(can)
can_index = distance_list.index(max(distance_list))
selected.append(candidate[can_index])
candidate.remove(candidate[can_index])
selected.sort()
res = []
for i in range(1,len(selected)):
if i == 1:
sel_start_time = start_time + datetime.timedelta(days = int(num_day / split_N * selected[i - 1]), hours = 0)
else:
sel_start_time = start_time + datetime.timedelta(days = int(num_day / split_N * selected[i - 1])+1, hours = 0)
sel_end_time = start_time + datetime.timedelta(days = int(num_day / split_N * selected[i]), hours =23)
sel_start_time = datetime.datetime.strftime(sel_start_time,'%Y-%m-%d %H:%M')
sel_end_time = datetime.datetime.strftime(sel_end_time,'%Y-%m-%d %H:%M')
res.append((sel_start_time, sel_end_time))
return res
else:
print("error in number of domain")
def load_weather_data_multi_domain(file_path, batch_size=6, station='Changping', number_domain=2, mode='pre_process', dis_type ='coral'):
# mode: 'tdc', 'pre_process'
data_file = os.path.join(file_path, "PRSA_Data_1.pkl")
mean_train, std_train = data_weather.get_weather_data_statistic(data_file, station=station, start_time='2013-3-1 0:0',
end_time='2016-10-30 23:0')
split_time_list = get_split_time(number_domain, mode=mode, data_file =data_file,station=station, dis_type = dis_type)
train_list = []
for i in range(len(split_time_list)):
time_temp = split_time_list[i]
train_loader = data_weather.get_weather_data(data_file, station=station, start_time=time_temp[0],
end_time=time_temp[1], batch_size=batch_size, mean=mean_train, std=std_train)
train_list.append(train_loader)
valid_vld_loader = data_weather.get_weather_data(data_file, station=station, start_time='2016-7-2 0:0',
end_time='2016-10-30 23:0', batch_size=batch_size, mean=mean_train, std=std_train)
test_loader = data_weather.get_weather_data(data_file, station=station, start_time='2016-11-2 0:0',
end_time='2017-2-28 23:0', batch_size=batch_size, mean=mean_train, std=std_train, shuffle=False)
return train_list, valid_vld_loader, test_loader
```
#### File: adarnn/tst/multiHeadAttention.py
```python
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tst.utils import generate_local_map_mask
class MultiHeadAttention(nn.Module):
"""Multi Head Attention block from Attention is All You Need.
Given 3 inputs of shape (batch_size, K, d_model), that will be used
to compute query, keys and values, we output a self attention
tensor of shape (batch_size, K, d_model).
Parameters
----------
d_model:
Dimension of the input vector.
q:
Dimension of all query matrix.
v:
Dimension of all value matrix.
h:
Number of heads.
attention_size:
Number of backward elements to apply attention.
Deactivated if ``None``. Default is ``None``.
"""
def __init__(self,
d_model: int,
q: int,
v: int,
h: int,
attention_size: int = None):
"""Initialize the Multi Head Block."""
super().__init__()
self._h = h
self._attention_size = attention_size
# Query, keys and value matrices
self._W_q = nn.Linear(d_model, q*self._h)
self._W_k = nn.Linear(d_model, q*self._h)
self._W_v = nn.Linear(d_model, v*self._h)
# Output linear function
self._W_o = nn.Linear(self._h*v, d_model)
# Score placeholder
self._scores = None
def forward(self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: Optional[str] = None) -> torch.Tensor:
"""Propagate forward the input through the MHB.
We compute for each head the queries, keys and values matrices,
followed by the Scaled Dot-Product. The result is concatenated
and returned with shape (batch_size, K, d_model).
Parameters
----------
query:
Input tensor with shape (batch_size, K, d_model) used to compute queries.
key:
Input tensor with shape (batch_size, K, d_model) used to compute keys.
value:
Input tensor with shape (batch_size, K, d_model) used to compute values.
mask:
Mask to apply on scores before computing attention.
One of ``'subsequent'``, None. Default is None.
Returns
-------
Self attention tensor with shape (batch_size, K, d_model).
"""
K = query.shape[1]
# Compute Q, K and V, concatenate heads on batch dimension
queries = torch.cat(self._W_q(query).chunk(self._h, dim=-1), dim=0)
keys = torch.cat(self._W_k(key).chunk(self._h, dim=-1), dim=0)
values = torch.cat(self._W_v(value).chunk(self._h, dim=-1), dim=0)
# Scaled Dot Product
self._scores = torch.bmm(queries, keys.transpose(1, 2)) / np.sqrt(K)
# Compute local map mask
if self._attention_size is not None:
attention_mask = generate_local_map_mask(K, self._attention_size, mask_future=False, device=self._scores.device)
self._scores = self._scores.masked_fill(attention_mask, float('-inf'))
# Compute future mask
if mask == "subsequent":
future_mask = torch.triu(torch.ones((K, K)), diagonal=1).bool()
future_mask = future_mask.to(self._scores.device)
self._scores = self._scores.masked_fill(future_mask, float('-inf'))
# Apply sotfmax
self._scores = F.softmax(self._scores, dim=-1)
attention = torch.bmm(self._scores, values)
# Concatenat the heads
attention_heads = torch.cat(attention.chunk(self._h, dim=0), dim=-1)
# Apply linear transformation W^O
self_attention = self._W_o(attention_heads)
return self_attention
@property
def attention_map(self) -> torch.Tensor:
"""Attention map after a forward propagation,
variable `score` in the original paper.
"""
if self._scores is None:
raise RuntimeError(
"Evaluate the model once to generate attention map")
return self._scores
class MultiHeadAttentionChunk(MultiHeadAttention):
"""Multi Head Attention block with chunk.
Given 3 inputs of shape (batch_size, K, d_model), that will be used
to compute query, keys and values, we output a self attention
tensor of shape (batch_size, K, d_model).
Queries, keys and values are divided in chunks of constant size.
Parameters
----------
d_model:
Dimension of the input vector.
q:
Dimension of all query matrix.
v:
Dimension of all value matrix.
h:
Number of heads.
attention_size:
Number of backward elements to apply attention.
Deactivated if ``None``. Default is ``None``.
chunk_size:
Size of chunks to apply attention on. Last one may be smaller (see :class:`torch.Tensor.chunk`).
Default is 168.
"""
def __init__(self,
d_model: int,
q: int,
v: int,
h: int,
attention_size: int = None,
chunk_size: Optional[int] = 168,
**kwargs):
"""Initialize the Multi Head Block."""
super().__init__(d_model, q, v, h, attention_size, **kwargs)
self._chunk_size = chunk_size
# Score mask for decoder
self._future_mask = nn.Parameter(torch.triu(torch.ones((self._chunk_size, self._chunk_size)), diagonal=1).bool(),
requires_grad=False)
if self._attention_size is not None:
self._attention_mask = nn.Parameter(generate_local_map_mask(self._chunk_size, self._attention_size),
requires_grad=False)
def forward(self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: Optional[str] = None) -> torch.Tensor:
"""Propagate forward the input through the MHB.
We compute for each head the queries, keys and values matrices,
followed by the Scaled Dot-Product. The result is concatenated
and returned with shape (batch_size, K, d_model).
Parameters
----------
query:
Input tensor with shape (batch_size, K, d_model) used to compute queries.
key:
Input tensor with shape (batch_size, K, d_model) used to compute keys.
value:
Input tensor with shape (batch_size, K, d_model) used to compute values.
mask:
Mask to apply on scores before computing attention.
One of ``'subsequent'``, None. Default is None.
Returns
-------
Self attention tensor with shape (batch_size, K, d_model).
"""
K = query.shape[1]
n_chunk = K // self._chunk_size
# Compute Q, K and V, concatenate heads on batch dimension
queries = torch.cat(torch.cat(self._W_q(query).chunk(self._h, dim=-1), dim=0).chunk(n_chunk, dim=1), dim=0)
keys = torch.cat(torch.cat(self._W_k(key).chunk(self._h, dim=-1), dim=0).chunk(n_chunk, dim=1), dim=0)
values = torch.cat(torch.cat(self._W_v(value).chunk(self._h, dim=-1), dim=0).chunk(n_chunk, dim=1), dim=0)
# Scaled Dot Product
self._scores = torch.bmm(queries, keys.transpose(1, 2)) / np.sqrt(self._chunk_size)
# Compute local map mask
if self._attention_size is not None:
self._scores = self._scores.masked_fill(self._attention_mask, float('-inf'))
# Compute future mask
if mask == "subsequent":
self._scores = self._scores.masked_fill(self._future_mask, float('-inf'))
# Apply softmax
self._scores = F.softmax(self._scores, dim=-1)
attention = torch.bmm(self._scores, values)
# Concatenat the heads
attention_heads = torch.cat(torch.cat(attention.chunk(
n_chunk, dim=0), dim=1).chunk(self._h, dim=0), dim=-1)
# Apply linear transformation W^O
self_attention = self._W_o(attention_heads)
return self_attention
class MultiHeadAttentionWindow(MultiHeadAttention):
"""Multi Head Attention block with moving window.
Given 3 inputs of shape (batch_size, K, d_model), that will be used
to compute query, keys and values, we output a self attention
tensor of shape (batch_size, K, d_model).
Queries, keys and values are divided in chunks using a moving window.
Parameters
----------
d_model:
Dimension of the input vector.
q:
Dimension of all query matrix.
v:
Dimension of all value matrix.
h:
Number of heads.
attention_size:
Number of backward elements to apply attention.
Deactivated if ``None``. Default is ``None``.
window_size:
Size of the window used to extract chunks.
Default is 168
padding:
Padding around each window. Padding will be applied to input sequence.
Default is 168 // 4 = 42.
"""
def __init__(self,
d_model: int,
q: int,
v: int,
h: int,
attention_size: int = None,
window_size: Optional[int] = 168,
padding: Optional[int] = 168 // 4,
**kwargs):
"""Initialize the Multi Head Block."""
super().__init__(d_model, q, v, h, attention_size, **kwargs)
self._window_size = window_size
self._padding = padding
self._q = q
self._v = v
# Step size for the moving window
self._step = self._window_size - 2 * self._padding
# Score mask for decoder
self._future_mask = nn.Parameter(torch.triu(torch.ones((self._window_size, self._window_size)), diagonal=1).bool(),
requires_grad=False)
if self._attention_size is not None:
self._attention_mask = nn.Parameter(generate_local_map_mask(self._window_size, self._attention_size),
requires_grad=False)
def forward(self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: Optional[str] = None) -> torch.Tensor:
"""Propagate forward the input through the MHB.
We compute for each head the queries, keys and values matrices,
followed by the Scaled Dot-Product. The result is concatenated
and returned with shape (batch_size, K, d_model).
Parameters
----------
query:
Input tensor with shape (batch_size, K, d_model) used to compute queries.
key:
Input tensor with shape (batch_size, K, d_model) used to compute keys.
value:
Input tensor with shape (batch_size, K, d_model) used to compute values.
mask:
Mask to apply on scores before computing attention.
One of ``'subsequent'``, None. Default is None.
Returns
-------
Self attention tensor with shape (batch_size, K, d_model).
"""
batch_size = query.shape[0]
# Apply padding to input sequence
query = F.pad(query.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)
key = F.pad(key.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)
value = F.pad(value.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)
# Compute Q, K and V, concatenate heads on batch dimension
queries = torch.cat(self._W_q(query).chunk(self._h, dim=-1), dim=0)
keys = torch.cat(self._W_k(key).chunk(self._h, dim=-1), dim=0)
values = torch.cat(self._W_v(value).chunk(self._h, dim=-1), dim=0)
# Divide Q, K and V using a moving window
queries = queries.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._q, self._window_size)).transpose(1, 2)
keys = keys.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._q, self._window_size)).transpose(1, 2)
values = values.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._v, self._window_size)).transpose(1, 2)
# Scaled Dot Product
self._scores = torch.bmm(queries, keys.transpose(1, 2)) / np.sqrt(self._window_size)
# Compute local map mask
if self._attention_size is not None:
self._scores = self._scores.masked_fill(self._attention_mask, float('-inf'))
# Compute future mask
if mask == "subsequent":
self._scores = self._scores.masked_fill(self._future_mask, float('-inf'))
# Apply softmax
self._scores = F.softmax(self._scores, dim=-1)
attention = torch.bmm(self._scores, values)
# Fold chunks back
attention = attention.reshape((batch_size*self._h, -1, self._window_size, self._v))
attention = attention[:, :, self._padding:-self._padding, :]
attention = attention.reshape((batch_size*self._h, -1, self._v))
# Concatenat the heads
attention_heads = torch.cat(attention.chunk(self._h, dim=0), dim=-1)
# Apply linear transformation W^O
self_attention = self._W_o(attention_heads)
return self_attention
```
#### File: CSG/a-domainbed/main.py
```python
import warnings
import sys
import torch as tc
sys.path.append("..")
from utils.utils_main import main_stem, get_parser, is_ood, process_continue_run
from utils.preprocess import data_loader
from utils.utils import boolstr, ZipLongest
from DomainBed.domainbed import datasets
from DomainBed.domainbed.lib import misc
from DomainBed.domainbed.lib.fast_data_loader import InfiniteDataLoader, FastDataLoader
__author__ = "<NAME>"
__email__ = "<EMAIL>"
# tc.autograd.set_detect_anomaly(True)
class MergeIters:
def __init__(self, *itrs):
self.itrs = itrs
self.zipped = ZipLongest(*itrs)
self.len = len(self.zipped)
def __iter__(self):
for vals in self.zipped:
yield tuple(tc.cat([val[i] for val in vals]) for i in range(len(vals[0])))
def __len__(self): return self.len
if __name__ == "__main__":
parser = get_parser()
parser.add_argument("--data_root", type = str, default = "./DomainBed/domainbed/data/")
parser.add_argument('--dataset', type = str, default = "PACS")
parser.add_argument("--testdoms", type = int, nargs = '+', default = [0])
parser.add_argument("--n_bat_test", type = int, default = None)
parser.add_argument("--traindoms", type = int, nargs = '+', default = None) # default: 'other' if `excl_test` else 'all'
parser.add_argument("--excl_test", type = boolstr, default = True) # only active when `traindoms` is None (by default)
parser.add_argument("--uda_frac", type = float, default = 1.)
parser.add_argument("--data_aug", type = boolstr, default = True)
parser.add_argument("--dim_s", type = int, default = 512)
parser.add_argument("--dim_v", type = int, default = 128)
parser.add_argument("--dim_btnk", type = int, default = 1024) # for discr_model
parser.add_argument("--dims_bb2bn", type = int, nargs = '*') # for discr_model
parser.add_argument("--dims_bn2s", type = int, nargs = '*') # for discr_model
parser.add_argument("--dims_s2y", type = int, nargs = '*') # for discr_model
parser.add_argument("--dims_bn2v", type = int, nargs = '*') # for discr_model
parser.add_argument("--vbranch", type = boolstr, default = False) # for discr_model
parser.add_argument("--dim_feat", type = int, default = 256) # for gen_model
parser.set_defaults(discrstru = "DBresnet50", genstru = "DCGANpretr",
n_bat = 32, n_epk = 40, eval_interval = 1,
optim = "Adam", lr = 5e-5, wl2 = 5e-4,
# momentum = .9, nesterov = True, lr_expo = .75, lr_wdatum = 6.25e-6, # only when "lr" is "SGD"
sig_s = 3e+1, sig_v = 3e+1, corr_sv = .7, tgt_mvn_prior = True, src_mvn_prior = True,
pstd_x = 1e-1, qstd_s = -1., qstd_v = -1.,
wgen = 1e-7, wsup = 0., wlogpi = 1.,
wda = .25,
domdisc_dimh = 1024, # for {dann, cdan, mdd} only
cdan_rand = False, # for cdan only
ker_alphas = [.5, 1., 2.], # for dan only
mdd_margin = 4. # for mdd only
)
ag = parser.parse_args()
if ag.wlogpi is None: ag.wlogpi = ag.wgen
if ag.n_bat_test is None: ag.n_bat_test = ag.n_bat
ag, ckpt = process_continue_run(ag)
IS_OOD = is_ood(ag.mode)
ag.data_dir = ag.data_root
ag.test_envs = ag.testdoms
ag.holdout_fraction = 1. - ag.tr_val_split
ag.uda_holdout_fraction = ag.uda_frac
ag.trial_seed = 0.
hparams = {'batch_size': ag.n_bat, 'class_balanced': False, 'data_augmentation': ag.data_aug}
# BEGIN: from 'domainbed.scripts.train.py'
if ag.dataset in vars(datasets):
dataset = vars(datasets)[ag.dataset](ag.data_dir,
ag.test_envs, hparams)
else:
raise NotImplementedError
# (customed plugin)
if ag.traindoms is None:
ag.traindoms = list(i for i in range(len(dataset)) if not ag.excl_test or i not in ag.test_envs)
ag.traindom = ag.traindoms # for printing info in `main_stem`
# (end)
# Split each env into an 'in-split' and an 'out-split'. We'll train on
# each in-split except the test envs, and evaluate on all splits.
# To allow unsupervised domain adaptation experiments, we split each test
# env into 'in-split', 'uda-split' and 'out-split'. The 'in-split' is used
# by collect_results.py to compute classification accuracies. The
# 'out-split' is used by the Oracle model selectino method. The unlabeled
# samples in 'uda-split' are passed to the algorithm at training time if
# args.task == "domain_adaptation". If we are interested in comparing
# domain generalization and domain adaptation results, then domain
# generalization algorithms should create the same 'uda-splits', which will
# be discared at training.
in_splits = []
out_splits = []
uda_splits = []
for env_i, env in enumerate(dataset):
uda = []
out, in_ = misc.split_dataset(env,
int(len(env)*ag.holdout_fraction),
misc.seed_hash(ag.trial_seed, env_i))
if env_i in ag.test_envs:
uda, in_ = misc.split_dataset(in_,
int(len(in_)*ag.uda_holdout_fraction),
misc.seed_hash(ag.trial_seed, env_i))
if hparams['class_balanced']:
in_weights = misc.make_weights_for_balanced_classes(in_)
out_weights = misc.make_weights_for_balanced_classes(out)
if uda is not None:
uda_weights = misc.make_weights_for_balanced_classes(uda)
else:
in_weights, out_weights, uda_weights = None, None, None
in_splits.append((in_, in_weights))
out_splits.append((out, out_weights))
if len(uda):
uda_splits.append((uda, uda_weights))
# Now `in_splits` and `out_splits` contain used-validation splits for all envs, and `uda_splits` contains the part of `in_splits` for uda for test envs only.
if len(uda_splits) == 0: # args.task == "domain_adaptation" and len(uda_splits) == 0:
raise ValueError("Not enough unlabeled samples for domain adaptation.")
train_loaders = [FastDataLoader( # InfiniteDataLoader(
dataset=env,
# weights=env_weights,
batch_size=hparams['batch_size'],
num_workers=dataset.N_WORKERS)
for i, (env, env_weights) in enumerate(in_splits)
if i in ag.traindoms]
val_loaders = [FastDataLoader( # InfiniteDataLoader(
dataset=env,
# weights=env_weights,
batch_size=ag.n_bat_test, # hparams['batch_size'],
num_workers=dataset.N_WORKERS)
for i, (env, env_weights) in enumerate(out_splits)
if i in ag.traindoms]
uda_loaders = [FastDataLoader( # InfiniteDataLoader(
dataset=env,
# weights=env_weights,
batch_size = hparams['batch_size'] * len(train_loaders), # =hparams['batch_size'],
num_workers=dataset.N_WORKERS)
for i, (env, env_weights) in enumerate(uda_splits)
# if i in args.test_envs
]
# eval_loaders = [FastDataLoader(
# dataset=env,
# batch_size=64,
# num_workers=dataset.N_WORKERS)
# for env, _ in (in_splits + out_splits + uda_splits)]
# eval_weights = [None for _, weights in (in_splits + out_splits + uda_splits)]
# eval_loader_names = ['env{}_in'.format(i)
# for i in range(len(in_splits))]
# eval_loader_names += ['env{}_out'.format(i)
# for i in range(len(out_splits))]
# eval_loader_names += ['env{}_uda'.format(i)
# for i in range(len(uda_splits))]
# END
archtype = "cnn"
shape_x = dataset.input_shape
dim_y = dataset.num_classes
tr_src_loader = MergeIters(*train_loaders)
val_src_loader = MergeIters(*val_loaders)
ls_ts_tgt_loader = uda_loaders
if not IS_OOD:
ls_tr_tgt_loader = uda_loaders
if IS_OOD:
main_stem( ag, ckpt, archtype, shape_x, dim_y,
tr_src_loader, val_src_loader, ls_ts_tgt_loader )
else:
for testdom, tr_tgt_loader, ts_tgt_loader in zip(
ag.testdoms, ls_tr_tgt_loader, ls_ts_tgt_loader):
main_stem( ag, ckpt, archtype, shape_x, dim_y,
tr_src_loader, val_src_loader, None,
tr_tgt_loader, ts_tgt_loader, testdom )
```
#### File: CSG/distr/tools.py
```python
import math
import torch as tc
from .utils import edic
from .base import Distr
__author__ = "<NAME>"
__version__ = "1.0.1"
__email__ = "<EMAIL>"
def elbo(p_joint: Distr, q_cond: Distr, obs: edic, n_mc: int=10, repar: bool=True) -> tc.Tensor: # [shape_bat] -> [shape_bat]
if hasattr(q_cond, "entropy"):
return q_cond.expect(lambda dc: p_joint.logp(dc,dc), obs, n_mc, repar) + q_cond.entropy(obs)
else:
return q_cond.expect(lambda dc: p_joint.logp(dc,dc) - q_cond.logp(dc,dc), obs, n_mc, repar)
def elbo_z2xy(p_zx: Distr, p_y1z: Distr, q_z1x: Distr, obs_xy: edic, n_mc: int=0, repar: bool=True) -> tc.Tensor:
""" For supervised VAE with structure x <- z -> y.
Observations are supervised (x,y) pairs.
For unsupervised observations of x data, use `elbo(p_zx, q_z1x, obs_x)` as VAE z -> x. """
if n_mc == 0:
q_y1x_logpval = q_z1x.expect(lambda dc: p_y1z.logp(dc,dc), obs_xy, 0, repar) #, reducefn=tc.logsumexp)
if hasattr(q_z1x, "entropy"): # No difference for Gaussian
expc_val = q_z1x.expect(lambda dc: p_zx.logp(dc,dc), obs_xy, 0, repar) + q_z1x.entropy(obs_xy)
else:
expc_val = q_z1x.expect(lambda dc: p_zx.logp(dc,dc) - q_z1x.logp(dc,dc), obs_xy, 0, repar)
return q_y1x_logpval + expc_val
else:
q_y1x_pval = q_z1x.expect(lambda dc: p_y1z.logp(dc,dc).exp(), obs_xy, n_mc, repar)
expc_val = q_z1x.expect(lambda dc: p_y1z.logp(dc,dc).exp() * (p_zx.logp(dc,dc) - q_z1x.logp(dc,dc)),
obs_xy, n_mc, repar)
return q_y1x_pval.log() + expc_val / q_y1x_pval
# q_y1x_logpval = q_z1x.expect(lambda dc: p_y1z.logp(dc,dc), obs_xy, n_mc, repar,
# reducefn=tc.logsumexp) - math.log(n_mc)
# expc_logval = q_z1x.expect(lambda dc: p_y1z.logp(dc,dc) + (p_zx.logp(dc,dc) - q_z1x.logp(dc,dc)).log(),
# obs_xy, n_mc, repar, reducefn=tc.logsumexp) - math.log(n_mc)
# return q_y1x_logpval + (expc_logval - q_y1x_logpval).exp()
def elbo_z2xy_twist(pt_zx: Distr, p_y1z: Distr, p_z: Distr, pt_z: Distr, qt_z1x: Distr, obs_xy: edic, n_mc: int=0, repar: bool=True) -> tc.Tensor:
vwei_p_y1z_logp = lambda dc: p_z.logp(dc,dc) - pt_z.logp(dc,dc) + p_y1z.logp(dc,dc) # z, y:
if n_mc == 0:
r_y1x_logpval = qt_z1x.expect(vwei_p_y1z_logp, obs_xy, 0, repar) #, reducefn=tc.logsumexp)
if hasattr(qt_z1x, "entropy"): # No difference for Gaussian
expc_val = qt_z1x.expect(lambda dc: pt_zx.logp(dc,dc), obs_xy, 0, repar) + qt_z1x.entropy(obs_xy)
else:
expc_val = qt_z1x.expect(lambda dc: pt_zx.logp(dc,dc) - qt_z1x.logp(dc,dc), obs_xy, 0, repar)
return r_y1x_logpval + expc_val
else:
r_y1x_pval = qt_z1x.expect(lambda dc: vwei_p_y1z_logp(dc).exp(), obs_xy, n_mc, repar)
expc_val = qt_z1x.expect( lambda dc: # z, x, y:
vwei_p_y1z_logp(dc).exp() * (pt_zx.logp(dc,dc) - qt_z1x.logp(dc,dc)),
obs_xy, n_mc, repar)
return r_y1x_pval.log() + expc_val / r_y1x_pval
# r_y1x_logpval = qt_z1x.expect(vwei_p_y1z_logp, obs_xy, n_mc, repar,
# reducefn=tc.logsumexp) - math.log(n_mc) # z, y:
# expc_logval = qt_z1x.expect(lambda dc: # z, x, y:
# vwei_p_y1z_logp(dc) + (pt_zx.logp(dc,dc) - qt_z1x.logp(dc,dc)).log(),
# obs_xy, n_mc, repar, reducefn=tc.logsumexp) - math.log(n_mc)
# return r_y1x_logpval + (expc_logval - r_y1x_logpval).exp()
def elbo_zy2x(p_zyx: Distr, q_y1x: Distr, q_z1xy: Distr, obs_x: edic, n_mc: int=0, repar: bool=True) -> tc.Tensor:
""" For supervised VAE with structure z -> x <- y (Kingma's semi-supervised VAE, M2). (z,y) correlation also allowed.
Observations are unsupervised x data.
For supervised observations of (x,y) pairs, use `elbo(p_zyx, q_z1xy, obs_xy)` as VAE z -> (x,y). """
if hasattr(q_y1x, "entropy"):
return q_y1x.expect(lambda dc: elbo(p_zyx, q_z1xy, dc, n_mc, repar),
obs_x, n_mc, repar) + q_y1x.entropy(obs_x)
else:
return q_y1x.expect(lambda dc: elbo(p_zyx, q_z1xy, dc, n_mc, repar) - q_y1x.logp(dc,dc),
obs_x, n_mc, repar)
```
#### File: CSG/test/distr_test.py
```python
import sys
import torch as tc
sys.path.append('..')
import distr as ds
from distr.utils import expand_front, swap_dim_ranges
'''Test cases of the 'distr' package.
'''
__author__ = "<NAME>"
__email__ = "<EMAIL>"
shape_x = (1,2)
shape_bat = (3,4)
device = tc.device("cuda:0" if tc.cuda.is_available() else "cpu")
ds.Distr.default_device = device
def test_fun(title, p_z, p_x1z):
print(title)
print("p_z:", p_z.names, p_z.parents)
print("p_x1z:", p_x1z.names, p_x1z.parents)
p_zx = p_z * p_x1z
print("p_zx:", p_zx.names, p_zx.parents)
smp_z = p_z.draw(shape_bat)
print("sample shape z:", smp_z['z'].shape)
smp_x1z = p_x1z.draw((), smp_z)
print("sample shape x:", smp_x1z['x'].shape)
print("logp match:", tc.allclose(
p_z.logp(smp_z) + p_x1z.logp(smp_x1z, smp_z),
p_zx.logp(smp_z|smp_x1z) ))
smp_zx = p_zx.draw(shape_bat)
print("sample shape z:", smp_zx['z'].shape)
print("sample shape x:", smp_zx['x'].shape)
print("logp match:", tc.allclose(
p_z.logp(smp_zx) + p_x1z.logp(smp_zx, smp_zx),
p_zx.logp(smp_zx) ))
print("logp_cartes shape:", p_x1z.logp_cartes(smp_x1z, smp_z).shape)
print()
ds.Distr.clear()
# Normal
ndim_x = len(shape_x)
test_fun("Normal:",
p_z = ds.Normal('z', 0., 1.),
p_x1z = ds.Normal('x', shape = shape_x, mean =
lambda z: swap_dim_ranges( expand_front(z, shape_x), (0, ndim_x), (ndim_x, ndim_x+z.ndim) ),
std = 1.
))
# MVNormal
test_fun("MVNormal:",
p_z = ds.MVNormal('z', 0., 1.),
p_x1z = ds.MVNormal('x', shape = shape_x, mean =
lambda z: swap_dim_ranges( expand_front(z, shape_x).squeeze(-1), (0, ndim_x), (ndim_x, ndim_x+z.ndim-1) ),
cov = 1.
))
# Catg
ncat_z = 3
ncat_x = 4
w_z = tc.rand(ncat_z)
w_z = w_z / w_z.sum()
w_x = tc.rand((ncat_z,) + shape_x + (ncat_x,), device=device)
w_x = w_x / w_x.sum(dim=-1, keepdim=True)
test_fun("Catg:",
p_z = ds.Catg('z', probs = w_z),
p_x1z = ds.Catg('x', shape = shape_x, probs =
lambda z: w_x.index_select(dim=0, index=z.flatten()).reshape(z.shape + w_x.shape[1:])
))
# Bern
w_x = tc.rand(shape_x, device=device)
w_x = tc.stack([1-w_x, w_x], dim=0)
test_fun("Bern:",
p_z = ds.Bern('z', probs = tc.rand(())),
p_x1z = ds.Bern('x', shape = shape_x, probs =
lambda z: w_x.index_select(dim=0, index=z.flatten()).reshape(z.shape + w_x.shape[1:])
))
```
#### File: utils/preprocess/data_loader.py
```python
from torchvision import datasets, transforms
import torch
from torch.utils import data
import numpy as np
from torchvision import transforms
import os
from PIL import Image
from torch.utils.data.sampler import SubsetRandomSampler
from skimage import io
class PlaceCrop(object):
def __init__(self, size, start_x, start_y):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
self.start_x = start_x
self.start_y = start_y
def __call__(self, img):
th, tw = self.size
return img.crop((self.start_x, self.start_y, self.start_x + tw, self.start_y + th))
class ResizeImage():
def __init__(self, size):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
th, tw = self.size
return img.resize((th, tw))
class myDataset(data.Dataset):
def __init__(self, root, transform=None, train=True):
self.train = train
class_dirs = [os.path.join(root, i) for i in os.listdir(root)]
imgs = []
for i in class_dirs:
imgs += [os.path.join(i, img) for img in os.listdir(i)]
np.random.shuffle(imgs)
imgs_mun = len(imgs)
# target:val = 8 :2
if self.train:
self.imgs = imgs[:int(0.3*imgs_mun)]
else:
self.imgs = imgs[int(0.3*imgs_mun):]
if transform:
self.transforms = transforms.Compose(
[transforms.Resize([256, 256]),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()])
else:
start_center = (256 - 224 - 1) / 2
self.transforms = transforms.Compose(
[transforms.Resize([224, 224]),
PlaceCrop(224, start_center, start_center),
transforms.ToTensor()])
def __getitem__(self, index):
img_path = self.imgs[index]
label = int(img_path.strip().split('/')[10])
print(img_path, label)
#data = Image.open(img_path)
data = io.imread(img_path)
data = Image.fromarray(data)
if data.getbands()[0] == 'L':
data = data.convert('RGB')
data = self.transforms(data)
return data, label
def __len__(self):
return len(self.imgs)
def load_training(root_path, domain, batch_size, kwargs, train_val_split=.5, rand_split=True):
kwargs_fin = dict(shuffle=True, drop_last=True)
kwargs_fin.update(kwargs)
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transform = transforms.Compose(
[ResizeImage(256),
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize])
data = datasets.ImageFolder(root=os.path.join(
root_path, domain), transform=transform)
if train_val_split <= 0:
train_loader = torch.utils.data.DataLoader(
data, batch_size=batch_size, **kwargs_fin)
return train_loader
else:
train_loader, val_loader = load_train_valid_split(
data, batch_size, kwargs_fin, val_ratio=1.-train_val_split, rand_split=rand_split)
return train_loader, val_loader
def load_testing(root_path, domain, batch_size, kwargs):
kwargs_fin = dict(shuffle=False, drop_last=False)
kwargs_fin.update(kwargs)
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
start_center = (256 - 224 - 1) / 2
transform = transforms.Compose(
[ResizeImage(256),
PlaceCrop(224, start_center, start_center),
transforms.ToTensor(),
normalize])
dataset = datasets.ImageFolder(root=os.path.join(
root_path, domain), transform=transform)
test_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, **kwargs_fin)
return test_loader
def load_train_valid_split(dataset, batch_size, kwargs, val_ratio=0.4, rand_split=True):
dataset_size = len(dataset)
indices = list(range(dataset_size))
if rand_split: np.random.shuffle(indices)
len_val = int(np.floor(val_ratio * dataset_size))
train_indices, val_indices = indices[len_val:], indices[:len_val]
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
__ = kwargs.pop('shuffle', None)
__ = kwargs.pop('drop_last', None)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
sampler=train_sampler, **kwargs, drop_last=True)
validation_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
sampler=valid_sampler, **kwargs, drop_last=True)
return train_loader, validation_loader
def load_data(root_path, source_dir, target_dir, batch_size):
kwargs = {'num_workers': 4, 'pin_memory': True}
source_loader = load_training(
root_path, source_dir, batch_size, kwargs)
target_loader = load_training(
root_path, target_dir, batch_size, kwargs)
test_loader = load_testing(
root_path, target_dir, batch_size, kwargs)
return source_loader, target_loader, test_loader
def load_all_test(root_path, dataset, batch_size, train, kwargs):
ls = []
domains = {'Office-31': ['amazon', 'dslr', 'webcam'],
'Office-Home': ['Art', 'Clipart', 'Product', 'RealWorld']}
for dom in domains[dataset]:
if train:
loader = load_training(root_path, dom, batch_size, kwargs, train_val_split=-1)
else:
loader = load_testing(root_path, dom, batch_size, kwargs)
ls.append(loader)
return ls
```
#### File: deep/DAAN/data_loader.py
```python
from torchvision import datasets, transforms
import torch
import numpy as np
from torchvision import transforms
import os
from PIL import Image, ImageOps
class ResizeImage():
def __init__(self, size):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
th, tw = self.size
return img.resize((th, tw))
class PlaceCrop(object):
def __init__(self, size, start_x, start_y):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
self.start_x = start_x
self.start_y = start_y
def __call__(self, img):
th, tw = self.size
return img.crop((self.start_x, self.start_y, self.start_x + tw, self.start_y + th))
def load_training(root_path, dir, batch_size, kwargs):
transform = transforms.Compose(
[transforms.Resize([256, 256]),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()])
data = datasets.ImageFolder(root=root_path + dir, transform=transform)
train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True, **kwargs)
return train_loader
def load_testing(root_path, dir, batch_size, kwargs):
start_center = (256 - 224 - 1) / 2
transform = transforms.Compose(
[transforms.Resize([224, 224]),
PlaceCrop(224, start_center, start_center),
transforms.ToTensor()])
data = datasets.ImageFolder(root=root_path + dir, transform=transform)
test_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=False, drop_last=False, **kwargs)
return test_loader
```
#### File: deep/DeepMEDA/deep_meda.py
```python
import torch
import torch.nn as nn
import ResNet
import mmd
import dynamic_factor
class DeepMEDA(nn.Module):
def __init__(self, num_classes=31, bottle_neck=True):
super(DeepMEDA, self).__init__()
self.feature_layers = ResNet.resnet50(True)
self.mmd_loss = mmd.MMD_loss()
self.bottle_neck = bottle_neck
if bottle_neck:
self.bottle = nn.Linear(2048, 256)
self.cls_fc = nn.Linear(256, num_classes)
else:
self.cls_fc = nn.Linear(2048, num_classes)
def forward(self, source, target, s_label):
source = self.feature_layers(source)
if self.bottle_neck:
source = self.bottle(source)
s_pred = self.cls_fc(source)
target = self.feature_layers(target)
if self.bottle_neck:
target = self.bottle(target)
t_label = self.cls_fc(target)
loss_c = self.mmd_loss.conditional(source, target, s_label, torch.nn.functional.softmax(t_label, dim=1))
loss_m = self.mmd_loss.marginal(source, target)
mu = dynamic_factor.estimate_mu(source.detach().cpu().numpy(), s_label.detach().cpu().numpy(), target.detach().cpu().numpy(), torch.max(t_label, 1)[1].detach().cpu().numpy())
return s_pred, loss_c, loss_m, mu
def predict(self, x):
x = self.feature_layers(x)
if self.bottle_neck:
x = self.bottle(x)
return self.cls_fc(x)
```
#### File: alg/algs/MLDG.py
```python
import torch
import copy
import torch.nn.functional as F
from alg.opt import *
import torch.autograd as autograd
from datautil.util import random_pairs_of_minibatches_by_domainperm
from alg.algs.ERM import ERM
class MLDG(ERM):
def __init__(self, args):
super(MLDG, self).__init__(args)
self.args = args
def update(self, minibatches, opt, sch):
"""
For computational efficiency, we do not compute second derivatives.
"""
num_mb = len(minibatches)
objective = 0
opt.zero_grad()
for p in self.network.parameters():
if p.grad is None:
p.grad = torch.zeros_like(p)
for (xi, yi), (xj, yj) in random_pairs_of_minibatches_by_domainperm(minibatches):
xi, yi, xj, yj = xi.cuda().float(), yi.cuda(
).long(), xj.cuda().float(), yj.cuda().long()
inner_net = copy.deepcopy(self.network)
inner_opt = get_optimizer(inner_net, self.args, True)
inner_sch = get_scheduler(inner_opt, self.args)
inner_obj = F.cross_entropy(inner_net(xi), yi)
inner_opt.zero_grad()
inner_obj.backward()
inner_opt.step()
if inner_sch:
inner_sch.step()
for p_tgt, p_src in zip(self.network.parameters(),
inner_net.parameters()):
if p_src.grad is not None:
p_tgt.grad.data.add_(p_src.grad.data / num_mb)
objective += inner_obj.item()
loss_inner_j = F.cross_entropy(inner_net(xj), yj)
grad_inner_j = autograd.grad(loss_inner_j, inner_net.parameters(),
allow_unused=True)
objective += (self.args.mldg_beta * loss_inner_j).item()
for p, g_j in zip(self.network.parameters(), grad_inner_j):
if g_j is not None:
p.grad.data.add_(
self.args.mldg_beta * g_j.data / num_mb)
objective /= len(minibatches)
opt.step()
if sch:
sch.step()
return {'total': objective}
```
#### File: feature_extractor/for_image_data/models.py
```python
import torch
import torch.nn as nn
import backbone
class Network(nn.Module):
def __init__(self, base_net='alexnet', n_class=31):
super(Network, self).__init__()
self.n_class = n_class
self.base_network = backbone.network_dict[base_net]()
self.classifier_layer = nn.Linear(
self.base_network.output_num(), n_class)
self.classifier_layer.weight.data.normal_(0, 0.005)
self.classifier_layer.bias.data.fill_(0.1)
def forward(self, x):
features = self.base_network(x)
clf = self.classifier_layer(features)
return clf
def get_features(self, x):
features = self.base_network(x)
return features
```
#### File: traditional/BDA/BDA.py
```python
import numpy as np
import scipy.io
import scipy.linalg
import sklearn.metrics
import sklearn.neighbors
from sklearn import metrics
from sklearn import svm
def kernel(ker, X1, X2, gamma):
K = None
if not ker or ker == 'primal':
K = X1
elif ker == 'linear':
if X2 is not None:
K = sklearn.metrics.pairwise.linear_kernel(
np.asarray(X1).T, np.asarray(X2).T)
else:
K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1).T)
elif ker == 'rbf':
if X2 is not None:
K = sklearn.metrics.pairwise.rbf_kernel(
np.asarray(X1).T, np.asarray(X2).T, gamma)
else:
K = sklearn.metrics.pairwise.rbf_kernel(
np.asarray(X1).T, None, gamma)
return K
def proxy_a_distance(source_X, target_X):
"""
Compute the Proxy-A-Distance of a source/target representation
"""
nb_source = np.shape(source_X)[0]
nb_target = np.shape(target_X)[0]
train_X = np.vstack((source_X, target_X))
train_Y = np.hstack((np.zeros(nb_source, dtype=int),
np.ones(nb_target, dtype=int)))
clf = svm.LinearSVC(random_state=0)
clf.fit(train_X, train_Y)
y_pred = clf.predict(train_X)
error = metrics.mean_absolute_error(train_Y, y_pred)
dist = 2 * (1 - 2 * error)
return dist
def estimate_mu(_X1, _Y1, _X2, _Y2):
adist_m = proxy_a_distance(_X1, _X2)
C = len(np.unique(_Y1))
epsilon = 1e-3
list_adist_c = []
for i in range(1, C + 1):
ind_i, ind_j = np.where(_Y1 == i), np.where(_Y2 == i)
Xsi = _X1[ind_i[0], :]
Xtj = _X2[ind_j[0], :]
adist_i = proxy_a_distance(Xsi, Xtj)
list_adist_c.append(adist_i)
adist_c = sum(list_adist_c) / C
mu = adist_c / (adist_c + adist_m)
if mu > 1:
mu = 1
if mu < epsilon:
mu = 0
return mu
class BDA:
def __init__(self, kernel_type='primal', dim=30, lamb=1, mu=0.5, gamma=1, T=10, mode='BDA', estimate_mu=False):
'''
Init func
:param kernel_type: kernel, values: 'primal' | 'linear' | 'rbf'
:param dim: dimension after transfer
:param lamb: lambda value in equation
:param mu: mu. Default is -1, if not specificied, it calculates using A-distance
:param gamma: kernel bandwidth for rbf kernel
:param T: iteration number
:param mode: 'BDA' | 'WBDA'
:param estimate_mu: True | False, if you want to automatically estimate mu instead of manally set it
'''
self.kernel_type = kernel_type
self.dim = dim
self.lamb = lamb
self.mu = mu
self.gamma = gamma
self.T = T
self.mode = mode
self.estimate_mu = estimate_mu
def fit_predict(self, Xs, Ys, Xt, Yt):
'''
Transform and Predict using 1NN as JDA paper did
:param Xs: ns * n_feature, source feature
:param Ys: ns * 1, source label
:param Xt: nt * n_feature, target feature
:param Yt: nt * 1, target label
:return: acc, y_pred, list_acc
'''
list_acc = []
X = np.hstack((Xs.T, Xt.T))
X /= np.linalg.norm(X, axis=0)
m, n = X.shape
ns, nt = len(Xs), len(Xt)
e = np.vstack((1 / ns * np.ones((ns, 1)), -1 / nt * np.ones((nt, 1))))
C = len(np.unique(Ys))
H = np.eye(n) - 1 / n * np.ones((n, n))
mu = self.mu
M = 0
Y_tar_pseudo = None
Xs_new = None
for t in range(self.T):
N = 0
M0 = e * e.T * C
if Y_tar_pseudo is not None and len(Y_tar_pseudo) == nt:
for c in range(1, C + 1):
e = np.zeros((n, 1))
Ns = len(Ys[np.where(Ys == c)])
Nt = len(Y_tar_pseudo[np.where(Y_tar_pseudo == c)])
if self.mode == 'WBDA':
Ps = Ns / len(Ys)
Pt = Nt / len(Y_tar_pseudo)
alpha = Pt / Ps
mu = 1
else:
alpha = 1
tt = Ys == c
e[np.where(tt == True)] = 1 / Ns
yy = Y_tar_pseudo == c
ind = np.where(yy == True)
inds = [item + ns for item in ind]
e[tuple(inds)] = -alpha / Nt
e[np.isinf(e)] = 0
N = N + np.dot(e, e.T)
# In BDA, mu can be set or automatically estimated using A-distance
# In WBDA, we find that setting mu=1 is enough
if self.estimate_mu and self.mode == 'BDA':
if Xs_new is not None:
mu = estimate_mu(Xs_new, Ys, Xt_new, Y_tar_pseudo)
else:
mu = 0
M = (1 - mu) * M0 + mu * N
M /= np.linalg.norm(M, 'fro')
K = kernel(self.kernel_type, X, None, gamma=self.gamma)
n_eye = m if self.kernel_type == 'primal' else n
a, b = K @ M @ K.T + self.lamb * np.eye(n_eye), K @ H @ K.T
w, V = scipy.linalg.eig(a, b)
ind = np.argsort(w)
A = V[:, ind[:self.dim]]
Z = A.T @ K
Z /= np.linalg.norm(Z, axis=0)
Xs_new, Xt_new = Z[:, :ns].T, Z[:, ns:].T
clf = sklearn.neighbors.KNeighborsClassifier(n_neighbors=1)
clf.fit(Xs_new, Ys.ravel())
Y_tar_pseudo = clf.predict(Xt_new)
acc = sklearn.metrics.accuracy_score(Yt, Y_tar_pseudo)
list_acc.append(acc)
print('{} iteration [{}/{}]: Acc: {:.4f}'.format(self.mode, t + 1, self.T, acc))
return acc, Y_tar_pseudo, list_acc
if __name__ == '__main__':
domains = ['caltech.mat', 'amazon.mat', 'webcam.mat', 'dslr.mat']
i, j = 0, 1 # Caltech -> Amazon
src, tar = domains[i], domains[j]
src_domain, tar_domain = scipy.io.loadmat(src), scipy.io.loadmat(tar)
Xs, Ys, Xt, Yt = src_domain['feas'], src_domain['label'], tar_domain['feas'], tar_domain['label']
bda = BDA(kernel_type='primal', dim=30, lamb=1, mu=0.5,
mode='BDA', gamma=1, estimate_mu=False)
acc, ypre, list_acc = bda.fit_predict(Xs, Ys, Xt, Yt)
print(acc)
```
#### File: code/traditional/KMM.py
```python
import numpy as np
import sklearn.metrics
from cvxopt import matrix, solvers
import os
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--norm', action='store_true')
args = parser.parse_args()
def kernel(ker, X1, X2, gamma):
K = None
if ker == 'linear':
if X2 is not None:
K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1), np.asarray(X2))
else:
K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1))
elif ker == 'rbf':
if X2 is not None:
K = sklearn.metrics.pairwise.rbf_kernel(np.asarray(X1), np.asarray(X2), gamma)
else:
K = sklearn.metrics.pairwise.rbf_kernel(np.asarray(X1), None, gamma)
return K
class KMM:
def __init__(self, kernel_type='linear', gamma=1.0, B=1.0, eps=None):
'''
Initialization function
:param kernel_type: 'linear' | 'rbf'
:param gamma: kernel bandwidth for rbf kernel
:param B: bound for beta
:param eps: bound for sigma_beta
'''
self.kernel_type = kernel_type
self.gamma = gamma
self.B = B
self.eps = eps
def fit(self, Xs, Xt):
'''
Fit source and target using KMM (compute the coefficients)
:param Xs: ns * dim
:param Xt: nt * dim
:return: Coefficients (Pt / Ps) value vector (Beta in the paper)
'''
ns = Xs.shape[0]
nt = Xt.shape[0]
if self.eps == None:
self.eps = self.B / np.sqrt(ns)
K = kernel(self.kernel_type, Xs, None, self.gamma)
kappa = np.sum(kernel(self.kernel_type, Xs, Xt, self.gamma) * float(ns) / float(nt), axis=1)
K = matrix(K.astype(np.double))
kappa = matrix(kappa.astype(np.double))
G = matrix(np.r_[np.ones((1, ns)), -np.ones((1, ns)), np.eye(ns), -np.eye(ns)])
h = matrix(np.r_[ns * (1 + self.eps), ns * (self.eps - 1), self.B * np.ones((ns,)), np.zeros((ns,))])
sol = solvers.qp(K, -kappa, G, h)
beta = np.array(sol['x'])
return beta
def load_data(folder, domain):
from scipy import io
data = io.loadmat(os.path.join(folder, domain + '_fc6.mat'))
return data['fts'], data['labels']
def knn_classify(Xs, Ys, Xt, Yt, k=1, norm=False):
model = KNeighborsClassifier(n_neighbors=k)
Ys = Ys.ravel()
Yt = Yt.ravel()
if norm:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
Xs = scaler.fit_transform(Xs)
Xt = scaler.fit_transform(Xt)
model.fit(Xs, Ys)
Yt_pred = model.predict(Xt)
acc = accuracy_score(Yt, Yt_pred)
print(f'Accuracy using kNN: {acc * 100:.2f}%')
if __name__ == "__main__":
# download the dataset here: https://www.jianguoyun.com/p/DcNAUg0QmN7PCBiF9asD (Password: <PASSWORD>)
folder = '/home/jindwang/mine/office31'
src_domain = 'amazon'
tar_domain = 'webcam'
Xs, Ys = load_data(folder, src_domain)
Xt, Yt = load_data(folder, tar_domain)
print('Source:', src_domain, Xs.shape, Ys.shape)
print('Target:', tar_domain, Xt.shape, Yt.shape)
kmm = KMM(kernel_type='rbf', B=10)
beta = kmm.fit(Xs, Xt)
print(beta)
print(beta.shape)
Xs_new = beta * Xs
knn_classify(Xs_new, Ys, Xt, Yt, k=1, norm=args.norm)
```
#### File: traditional/pyEasyTL/EasyTL.py
```python
import numpy as np
from intra_alignment import CORAL_map, GFK_map, PCA_map
# from label_prop import label_prop
from label_prop_v2 import label_prop
def get_cosine_dist(A, B):
B = np.reshape(B, (1, -1))
if A.shape[1] == 1:
A = np.hstack((A, np.zeros((A.shape[0], 1))))
B = np.hstack((B, np.zeros((B.shape[0], 1))))
aa = np.sum(np.multiply(A, A), axis=1).reshape(-1, 1)
bb = np.sum(np.multiply(B, B), axis=1).reshape(-1, 1)
ab = A @ B.T
# to avoid NaN for zero norm
aa[aa==0] = 1
bb[bb==0] = 1
D = np.real(np.ones((A.shape[0], B.shape[0])) - np.multiply((1/np.sqrt(np.kron(aa, bb.T))), ab))
return D
def get_ma_dist(A, B):
Y = A.copy()
X = B.copy()
S = np.cov(X.T)
try:
SI = np.linalg.inv(S)
except:
print("Singular Matrix: using np.linalg.pinv")
SI = np.linalg.pinv(S)
mu = np.mean(X, axis=0)
diff = Y - mu
Dct_c = np.diag(diff @ SI @ diff.T)
return Dct_c
def get_class_center(Xs,Ys,Xt,dist):
source_class_center = np.array([])
Dct = np.array([])
for i in np.unique(Ys):
sel_mask = Ys == i
X_i = Xs[sel_mask.flatten()]
mean_i = np.mean(X_i, axis=0)
if len(source_class_center) == 0:
source_class_center = mean_i.reshape(-1, 1)
else:
source_class_center = np.hstack((source_class_center, mean_i.reshape(-1, 1)))
if dist == "ma":
Dct_c = get_ma_dist(Xt, X_i)
elif dist == "euclidean":
Dct_c = np.sqrt(np.nansum((mean_i - Xt)**2, axis=1))
elif dist == "sqeuc":
Dct_c = np.nansum((mean_i - Xt)**2, axis=1)
elif dist == "cosine":
Dct_c = get_cosine_dist(Xt, mean_i)
elif dist == "rbf":
Dct_c = np.nansum((mean_i - Xt)**2, axis=1)
Dct_c = np.exp(- Dct_c / 1);
if len(Dct) == 0:
Dct = Dct_c.reshape(-1, 1)
else:
Dct = np.hstack((Dct, Dct_c.reshape(-1, 1)))
return source_class_center, Dct
def EasyTL(Xs,Ys,Xt,Yt,intra_align="coral",dist="euclidean",lp="linear"):
# Inputs:
# Xs : source data, ns * m
# Ys : source label, ns * 1
# Xt : target data, nt * m
# Yt : target label, nt * 1
# The following inputs are not necessary
# intra_align : intra-domain alignment: coral(default)|gfk|pca|raw
# dist : distance: Euclidean(default)|ma(Mahalanobis)|cosine|rbf
# lp : linear(default)|binary
# Outputs:
# acc : final accuracy
# y_pred : predictions for target domain
# Reference:
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
# Easy Transfer Learning By Exploiting Intra-domain Structures.
# IEEE International Conference on Multimedia & Expo (ICME) 2019.
C = len(np.unique(Ys))
if C > np.max(Ys):
Ys += 1
Yt += 1
m = len(Yt)
if intra_align == "raw":
print('EasyTL using raw feature...')
elif intra_align == "pca":
print('EasyTL using PCA...')
print('Not implemented yet, using raw feature')
#Xs, Xt = PCA_map(Xs, Xt)
elif intra_align == "gfk":
print('EasyTL using GFK...')
print('Not implemented yet, using raw feature')
#Xs, Xt = GFK_map(Xs, Xt)
elif intra_align == "coral":
print('EasyTL using CORAL...')
Xs = CORAL_map(Xs, Xt)
_, Dct = get_class_center(Xs,Ys,Xt,dist)
print('Start intra-domain programming...')
Mcj = label_prop(C,m,Dct,lp)
y_pred = np.argmax(Mcj, axis=1) + 1
acc = np.mean(y_pred == Yt.flatten());
return acc, y_pred
``` |
{
"source": "jiasenlu/alfred",
"score": 2
} |
#### File: gen/utils/replay_json.py
```python
import json
def replay_json(env, json_file):
# load json data
with open(json_file) as f:
traj_data = json.load(f)
# setup
scene_num = traj_data['scene']['scene_num']
object_poses = traj_data['scene']['object_poses']
dirty_and_empty = traj_data['scene']['dirty_and_empty']
object_toggles = traj_data['scene']['object_toggles']
scene_name = 'FloorPlan%d' % scene_num
env.reset(scene_name)
env.restore_scene(object_poses, object_toggles, dirty_and_empty)
# initialize
event = env.step(dict(traj_data['scene']['init_action']))
# print("Task: %s" % (traj_data['template']['task_desc']))
steps_taken = 0
for ll_action in traj_data['plan']['low_actions']:
hl_action_idx, traj_api_cmd, traj_discrete_action = \
ll_action['high_idx'], ll_action['api_action'], ll_action['discrete_action']
# print templated low-level instructions & discrete action
# print("HL Templ: %s, LL Cmd: %s" % (traj_data['template']['high_descs'][hl_action_idx],
# traj_discrete_action['action']))
# Use the va_interact that modelers will have to use at inference time.
action_name, action_args = traj_discrete_action['action'], traj_discrete_action['args']
# three ways to specify object of interest mask
# 1. create a rectangular mask from bbox
# mask = env.bbox_to_mask(action_args['bbox']) if 'bbox' in action_args else None # some commands don't require any arguments
# 2. create a point mask from bbox
# mask = env.point_to_mask(action_args['point']) if 'point' in action_args else None
# 3. use full pixel-wise segmentation mask
compressed_mask = action_args['mask'] if 'mask' in action_args else None
if compressed_mask is not None:
mask = env.decompress_mask(compressed_mask)
else:
mask = None
success, event, target_instance_id, err, _ = env.va_interact(action_name, interact_mask=mask)
if not success:
raise RuntimeError(err)
steps_taken += 1
return steps_taken
``` |
{
"source": "jiasenwu/gan",
"score": 2
} |
#### File: examples/mnist_estimator/train_experiment_lib.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
from PIL import Image as image_lib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import tensorflow_gan as tfgan
from tensorflow_gan.examples.mnist import data_provider
from tensorflow_gan.examples.mnist import networks
from tensorflow_gan.examples.mnist import util
HParams = collections.namedtuple('HParams', [
'generator_lr', 'discriminator_lr', 'joint_train', 'batch_size',
'noise_dims', 'model_dir', 'num_train_steps', 'num_eval_steps',
'num_reader_parallel_calls', 'use_dummy_data'
])
def input_fn(mode, params):
"""Input function for GANEstimator."""
if 'batch_size' not in params:
raise ValueError('batch_size must be in params')
if 'noise_dims' not in params:
raise ValueError('noise_dims must be in params')
bs = params['batch_size']
nd = params['noise_dims']
split = 'train' if mode == tf.estimator.ModeKeys.TRAIN else 'test'
shuffle = (mode == tf.estimator.ModeKeys.TRAIN)
just_noise = (mode == tf.estimator.ModeKeys.PREDICT)
noise_ds = (tf.data.Dataset.from_tensors(0).repeat()
.map(lambda _: tf.random.normal([bs, nd])))
if just_noise:
return noise_ds
if params['use_dummy_data']:
img = np.zeros((bs, 28, 28, 1), dtype=np.float32)
images_ds = tf.data.Dataset.from_tensors(img).repeat()
else:
images_ds = (data_provider.provide_dataset(
split, bs, params['num_reader_parallel_calls'],
shuffle).map(lambda x: x['images'])) # Just take the images.
return tf.data.Dataset.zip((noise_ds, images_ds))
def unconditional_generator(noise, mode):
"""MNIST generator with extra argument for tf.Estimator's `mode`."""
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
return networks.unconditional_generator(noise, is_training=is_training)
def get_metrics(gan_model):
"""Return metrics for MNIST experiment."""
real_mnist_score = util.mnist_score(gan_model.real_data)
generated_mnist_score = util.mnist_score(gan_model.generated_data)
frechet_distance = util.mnist_frechet_distance(
gan_model.real_data, gan_model.generated_data)
return {
'real_mnist_score': tf.compat.v1.metrics.mean(real_mnist_score),
'mnist_score': tf.compat.v1.metrics.mean(generated_mnist_score),
'frechet_distance': tf.compat.v1.metrics.mean(frechet_distance),
}
def make_estimator(hparams):
return tfgan.estimator.GANEstimator(
model_dir=hparams.model_dir,
generator_fn=unconditional_generator,
discriminator_fn=networks.unconditional_discriminator,
generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
params=hparams._asdict(),
generator_optimizer=tf.compat.v1.train.AdamOptimizer(
hparams.generator_lr, 0.5),
discriminator_optimizer=tf.compat.v1.train.AdamOptimizer(
hparams.discriminator_lr, 0.5),
add_summaries=tfgan.estimator.SummaryType.IMAGES,
get_eval_metric_ops_fn=get_metrics)
def write_predictions_to_disk(predictions, out_dir, current_step):
"""Write some inference from the final model to disk."""
grid_shape = (predictions.shape[0] // 10, 10)
tiled_image = tfgan.eval.python_image_grid(predictions, grid_shape=grid_shape)
eval_dir = os.path.join(out_dir, 'outputs')
if not tf.io.gfile.exists(eval_dir):
tf.io.gfile.makedirs(eval_dir)
fn = os.path.join(eval_dir, 'unconditional_gan_%ssteps.png' % current_step)
with tf.io.gfile.GFile(fn, 'w') as f:
# Convert tiled_image from float32 in [-1, 1] to unit8 [0, 255].
img_np = np.squeeze((255 / 2.0) * (tiled_image + 1.0), axis=2)
pil_image = image_lib.fromarray(img_np.astype(np.uint8))
pil_image.convert('RGB').save(f, 'PNG')
tf.compat.v1.logging.info('Wrote output to: %s', fn)
def train(hparams):
"""Trains an MNIST GAN.
Args:
hparams: An HParams instance containing the hyperparameters for training.
"""
estimator = make_estimator(hparams)
train_spec = tf.estimator.TrainSpec(
input_fn=input_fn, max_steps=hparams.num_train_steps)
eval_spec = tf.estimator.EvalSpec(
name='default', input_fn=input_fn, steps=hparams.num_eval_steps)
# Run training and evaluation for some steps.
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# Generate predictions and write them to disk.
yields_prediction = estimator.predict(input_fn)
predictions = np.array([next(yields_prediction) for _ in xrange(100)])
write_predictions_to_disk(predictions, hparams.model_dir,
hparams.num_train_steps)
```
#### File: examples/self_attention_estimator/ops_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_gan.examples.self_attention_estimator import ops
class OpsTest(tf.test.TestCase):
def test_snconv2d_shapes(self):
"""Tests the spectrally normalized 2d conv function.
This is a minimal test to make sure that shapes are OK.
The image shape should match after snconv is applied.
"""
if tf.executing_eagerly():
# `compute_spectral_norm` doesn't work when executing eagerly.
return
image = tf.random.normal([10, 32, 32, 3])
snconv_image = ops.snconv2d(image, 3, k_h=3, k_w=3, d_h=1, d_w=1)
self.assertEqual([10, 32, 32, 3], snconv_image.shape.as_list())
def test_snlinear_shapes(self):
"""Tests the spectrally normalized linear layer.
This is a minimal test to make sure that shapes are OK.
The vector shape should match after snlinear.
"""
if tf.executing_eagerly():
# `compute_spectral_norm` doesn't work when executing eagerly.
return
vector = tf.random.normal([10, 32])
snconv_vector = ops.snlinear(vector, 32)
self.assertEqual([10, 32], snconv_vector.shape.as_list())
def test_sn_embedding_shapes(self):
"""Tests the spectrally normalized embedding layer.
When label = 10, embedding_size = 128, the
output shape should be [10, 128]
"""
if tf.executing_eagerly():
# `compute_spectral_norm` doesn't work when executing eagerly.
return
label = tf.ones([10,], dtype=tf.int32)
vector = ops.sn_embedding(label, number_classes=1000, embedding_size=128)
self.assertEqual([10, 128], vector.shape.as_list())
def test_conditional_batch_norm_shapes(self):
"""Tests the conditional batch norm layer.
This is a minimal test to make sure that shapes are OK.
"""
c_bn = ops.ConditionalBatchNorm(num_categories=1000)
label = tf.ones([10,], dtype=tf.int32)
image = tf.random.normal([10, 32, 32, 3])
bn_image = c_bn(image, label)
self.assertEqual([10, 32, 32, 3], bn_image.shape.as_list())
def test_batch_norm_shapes(self):
"""Tests the batch norm layer.
This is a minimal test to make sure that shapes are OK.
"""
bn = ops.BatchNorm()
image = tf.random.normal([10, 32, 32, 3])
bn_image = bn(image)
self.assertEqual([10, 32, 32, 3], bn_image.shape.as_list())
def test_sn_conv1x1_shapes(self):
"""Tests that downsampling has the desired effect on shape."""
if tf.executing_eagerly():
# `compute_spectral_norm` doesn't work when executing eagerly.
return
image = tf.random.normal([10, 32, 32, 3])
big_image = ops.sn_conv1x1(image, 7, name='test_conv')
self.assertEqual([10, 32, 32, 7], big_image.shape.as_list())
def test_sn_non_local_block_sim_shapes(self):
"""Tests that downsampling has the desired effect on shape."""
if tf.executing_eagerly():
# `compute_spectral_norm` doesn't work when executing eagerly.
return
image = tf.random.normal([10, 8, 8, 64])
big_image = ops.sn_non_local_block_sim(image, name='test_sa')
self.assertEqual([10, 8, 8, 64], big_image.shape.as_list())
if __name__ == '__main__':
tf.test.main()
```
#### File: examples/stargan_estimator/train_lib.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import PIL
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import tensorflow_gan as tfgan
from tensorflow_gan.examples.stargan import network
from tensorflow_gan.examples.stargan_estimator import data_provider
HParams = collections.namedtuple('HParams', [
'batch_size', 'patch_size', 'output_dir', 'generator_lr',
'discriminator_lr', 'max_number_of_steps', 'steps_per_eval', 'adam_beta1',
'adam_beta2', 'gen_disc_step_ratio', 'master', 'ps_tasks', 'task'
])
def _get_optimizer(gen_lr, dis_lr, beta1, beta2):
"""Returns generator optimizer and discriminator optimizer.
Args:
gen_lr: A scalar float `Tensor` or a Python number. The Generator learning
rate.
dis_lr: A scalar float `Tensor` or a Python number. The Discriminator
learning rate.
beta1: A scalar float `Tensor` or a Python number. The beta1 parameter to
the `AdamOptimizer`.
beta2: A scalar float `Tensor` or a Python number. The beta2 parameter to
the `AdamOptimizer`.
Returns:
A tuple of generator optimizer and discriminator optimizer.
"""
gen_opt = tf.compat.v1.train.AdamOptimizer(
gen_lr, beta1=beta1, beta2=beta2, use_locking=True)
dis_opt = tf.compat.v1.train.AdamOptimizer(
dis_lr, beta1=beta1, beta2=beta2, use_locking=True)
return gen_opt, dis_opt
def _define_train_step(gen_disc_step_ratio):
"""Get the training step for generator and discriminator for each GAN step.
Args:
gen_disc_step_ratio: A python number. The ratio of generator to
discriminator training steps.
Returns:
GANTrainSteps namedtuple representing the training step configuration.
"""
if gen_disc_step_ratio <= 1:
discriminator_step = int(1 / gen_disc_step_ratio)
return tfgan.GANTrainSteps(1, discriminator_step)
else:
generator_step = int(gen_disc_step_ratio)
return tfgan.GANTrainSteps(generator_step, 1)
def _get_summary_image(estimator, test_images_np):
"""Returns a numpy image of the generate on the test images."""
num_domains = len(test_images_np)
img_rows = []
for img_np in test_images_np:
def test_input_fn():
dataset_imgs = [img_np] * num_domains # pylint:disable=cell-var-from-loop
dataset_lbls = [tf.one_hot([d], num_domains) for d in xrange(num_domains)]
# Make into a dataset.
dataset_imgs = np.stack(dataset_imgs)
dataset_imgs = np.expand_dims(dataset_imgs, 1)
dataset_lbls = tf.stack(dataset_lbls)
unused_tensor = tf.zeros(num_domains)
return tf.data.Dataset.from_tensor_slices(((dataset_imgs, dataset_lbls),
unused_tensor))
prediction_iterable = estimator.predict(test_input_fn)
predictions = [next(prediction_iterable) for _ in xrange(num_domains)]
transform_row = np.concatenate([img_np] + predictions, 1)
img_rows.append(transform_row)
all_rows = np.concatenate(img_rows, 0)
# Normalize` [-1, 1] to [0, 1].
normalized_summary = (all_rows + 1.0) / 2.0
return normalized_summary
def train(hparams, override_generator_fn=None, override_discriminator_fn=None):
"""Trains a StarGAN.
Args:
hparams: An HParams instance containing the hyperparameters for training.
override_generator_fn: A generator function that overrides the default one.
override_discriminator_fn: A discriminator function that overrides the
default one.
"""
# Create directories if not exist.
if not tf.io.gfile.exists(hparams.output_dir):
tf.io.gfile.makedirs(hparams.output_dir)
# Make sure steps integers are consistent.
if hparams.max_number_of_steps % hparams.steps_per_eval != 0:
raise ValueError('`max_number_of_steps` must be divisible by '
'`steps_per_eval`.')
# Create optimizers.
gen_opt, dis_opt = _get_optimizer(hparams.generator_lr,
hparams.discriminator_lr,
hparams.adam_beta1, hparams.adam_beta2)
# Create estimator.
stargan_estimator = tfgan.estimator.StarGANEstimator(
generator_fn=override_generator_fn or network.generator,
discriminator_fn=override_discriminator_fn or network.discriminator,
loss_fn=tfgan.stargan_loss,
generator_optimizer=gen_opt,
discriminator_optimizer=dis_opt,
get_hooks_fn=tfgan.get_sequential_train_hooks(
_define_train_step(hparams.gen_disc_step_ratio)),
add_summaries=tfgan.estimator.SummaryType.IMAGES)
# Get input function for training and test images.
train_input_fn = lambda: data_provider.provide_data( # pylint:disable=g-long-lambda
'train', hparams.batch_size, hparams.patch_size)
test_images_np = data_provider.provide_celeba_test_set(hparams.patch_size)
filename_str = os.path.join(hparams.output_dir, 'summary_image_%i.png')
# Periodically train and write prediction output to disk.
cur_step = 0
while cur_step < hparams.max_number_of_steps:
cur_step += hparams.steps_per_eval
stargan_estimator.train(train_input_fn, steps=cur_step)
summary_img = _get_summary_image(stargan_estimator, test_images_np)
with tf.io.gfile.GFile(filename_str % cur_step, 'w') as f:
PIL.Image.fromarray((255 * summary_img).astype(np.uint8)).save(f, 'PNG')
```
#### File: python/estimator/tpu_gan_estimator_test_lib.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import shutil
import tempfile
from absl import flags
from absl.testing import parameterized
import numpy as np
import six
import tensorflow as tf
import tensorflow_gan as tfgan
# Private functions to test.
from tensorflow_gan.python.estimator.tpu_gan_estimator import get_eval_estimator_spec
from tensorflow_gan.python.estimator.tpu_gan_estimator import get_predict_estimator_spec
from tensorflow_gan.python.estimator.tpu_gan_estimator import get_train_estimator_spec
from tensorflow_gan.python.estimator.tpu_gan_estimator import LossFns
from tensorflow_gan.python.estimator.tpu_gan_estimator import Optimizers
flags.DEFINE_bool('use_tpu', False, 'Whether to run test on TPU or not.')
TpuRunConfig = tf.compat.v1.estimator.tpu.RunConfig
CrossShardOptimizer = tf.compat.v1.tpu.CrossShardOptimizer
TPUEstimatorSpec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec
class TestOptimizerWrapper(tf.compat.v1.train.Optimizer):
"""An optimizer wrapper that is designed to share a real optimizer.
The idea is that multiple instances of this class can share the real optimizer
and this class will keep track of which steps executed on the real optimizer
were executed by which instance of the wrapper class. This is useful for
testing that the order of generator and discriminator steps is as desired.
This optimizer also has an assertion that two consecutive substeps do not
generate the same loss. This is meant for the toy case where every substep
uses the same input data. If the assertion fails it implies that the weights
for the second step were read before the updates from the first step were
applied (or the training has converged, which is unlikely in a test scenario).
"""
def __init__(self, opt, name):
super(TestOptimizerWrapper, self).__init__(use_locking=False, name=name)
self._opt = opt
self._first_call = True
self._name = name
def compute_gradients(self, loss, var_list, *args, **kwargs):
# Ensure that we don't get the same loss twice in a row. If we get this it
# implies that the previous weight updates have not been applied before the
# loss was computed.
if self._first_call:
self._create_non_slot_variable(
initial_value=0.0, name='last_loss', colocate_with=var_list[0])
graph = None if tf.executing_eagerly() else var_list[0].graph
last_loss = self._get_non_slot_variable('last_loss', graph=graph)
if self._first_call:
assert_op = tf.no_op()
else:
substep_counter = self._opt._get_non_slot_variable( # pylint:disable=protected-access
'substep_counter', graph=graph)
assert_op = tf.Assert(
tf.not_equal(loss, last_loss), [
self._name, 'encountered repeated loss at substep',
substep_counter, 'current loss:', loss, 'previous loss:',
last_loss
])
with tf.control_dependencies([assert_op]):
assign_op = last_loss.assign(loss, use_locking=True)
self._first_call = False
with tf.control_dependencies([assign_op]):
return self._opt.compute_gradients(loss, var_list, *args, **kwargs)
# Wraps the apply_gradients method of the shared 'real' optimizer, but also
# updates the internal substep_counter and substep_mask variables to indicate
# the that the substep was executed on this optimizer. Tests that want to read
# these variables should access them via Estimator.get_variable_value(), since
# Estimator.train creates its own tf.Graph, so reading the variables from the
# optimizer instance would give errors about using a variable in a different
# Graph than where it was created.
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
colocate_with = grads_and_vars[0][1]
# Shared with other wrappers of self._opt.
self._opt._create_non_slot_variable( # pylint:disable=protected-access
initial_value=0,
name='substep_counter',
colocate_with=colocate_with)
# Not shared
self._create_non_slot_variable(
initial_value=0,
name='substep_mask',
colocate_with=colocate_with)
update_op = self._opt.apply_gradients(
grads_and_vars, global_step=global_step)
graph = None if tf.executing_eagerly() else colocate_with.graph
with tf.control_dependencies([update_op]):
return self._track_calls(graph)
def _track_calls(self, graph):
substep_counter = self._opt._get_non_slot_variable( # pylint:disable=protected-access
'substep_counter', graph=graph)
substep_mask = self._get_non_slot_variable('substep_mask', graph=graph)
current_substep_mask = tf.bitwise.left_shift(1, substep_counter)
updated_substep_mask = tf.bitwise.bitwise_or(current_substep_mask,
substep_mask)
assign_op = tf.compat.v1.assign(
substep_mask, updated_substep_mask, use_locking=True)
with tf.control_dependencies([assign_op]):
inc_op = tf.compat.v1.assign_add(substep_counter, 1, use_locking=True)
return inc_op
def generator_fn(noise, mode):
del mode
return tf.compat.v1.layers.dense(
noise, tf.compat.dimension_value(noise.shape[1]))
def discriminator_fn(data, unused_conditioning, mode):
del unused_conditioning, mode
return tf.compat.v1.layers.dense(data, 1)
def get_dummy_gan_model(generated_data=None):
"""Returns a GANModel tuple for testing."""
if generated_data is None:
generated_data = tf.ones([3, 4])
# TODO(joelshor): Find a better way of creating a variable scope.
with tf.compat.v1.variable_scope(
'generator', reuse=tf.compat.v1.AUTO_REUSE) as gen_scope:
gen_var = tf.compat.v1.get_variable('dummy_var', initializer=0.0)
with tf.compat.v1.variable_scope(
'discriminator', reuse=tf.compat.v1.AUTO_REUSE) as dis_scope:
dis_var = tf.compat.v1.get_variable('dummy_var', initializer=0.0)
return tfgan.GANModel(
generator_inputs=tf.zeros(shape=()),
generated_data=generated_data,
generator_variables=[gen_var],
generator_scope=gen_scope,
generator_fn=None,
real_data=tf.zeros([3, 4]),
discriminator_real_outputs=tf.ones([1, 2, 3]) * dis_var,
discriminator_gen_outputs=tf.ones([1, 2, 3]) * gen_var * dis_var,
discriminator_variables=[dis_var],
discriminator_scope=dis_scope,
discriminator_fn=None)
def prepare_arguments_for_metric_fn(generator_inputs, generated_data, real_data,
discriminator_real_outputs,
discriminator_gen_outputs):
del generator_inputs, discriminator_real_outputs, discriminator_gen_outputs
return {
'my_real_data': real_data,
'my_generated_data': generated_data,
}
def get_metrics_custom_args(my_real_data, my_generated_data):
return {
'mse_custom_metric':
tf.compat.v1.metrics.mean_squared_error(my_real_data,
my_generated_data)
}
def get_metrics(generator_inputs, generated_data, real_data,
discriminator_real_outputs, discriminator_gen_outputs):
del generator_inputs, discriminator_real_outputs, discriminator_gen_outputs
return {
'mse_custom_metric':
tf.compat.v1.metrics.mean_squared_error(real_data, generated_data)
}
class GetTPUEstimatorSpecTest(tf.test.TestCase, parameterized.TestCase):
"""Tests that the EstimatorSpec is constructed appropriately."""
@classmethod
def setUpClass(cls):
super(GetTPUEstimatorSpecTest, cls).setUpClass()
cls._generator_optimizer = CrossShardOptimizer(
tf.compat.v1.train.GradientDescentOptimizer(1.0))
cls._discriminator_optimizer = CrossShardOptimizer(
tf.compat.v1.train.GradientDescentOptimizer(1.0))
cls._optimizers = Optimizers(cls._generator_optimizer,
cls._discriminator_optimizer)
cls._loss_fns = LossFns(tfgan.losses.wasserstein_generator_loss,
tfgan.losses.wasserstein_discriminator_loss)
@parameterized.named_parameters(
('joint_train', True),
('train_sequential', False),
)
def test_get_train_estimator_spec(self, joint_train):
with tf.Graph().as_default():
if joint_train:
gan_model_fns = [get_dummy_gan_model]
else:
gan_model_fns = [get_dummy_gan_model, get_dummy_gan_model]
spec = get_train_estimator_spec(
gan_model_fns,
self._loss_fns,
{}, # gan_loss_kwargs
self._optimizers,
joint_train=joint_train,
is_on_tpu=flags.FLAGS.use_tpu,
gan_train_steps=tfgan.GANTrainSteps(1, 1),
add_summaries=not flags.FLAGS.use_tpu)
self.assertIsInstance(spec, TPUEstimatorSpec)
self.assertEqual(tf.estimator.ModeKeys.TRAIN, spec.mode)
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.train_op)
self.assertIsNotNone(spec.training_hooks)
def test_get_eval_estimator_spec(self):
with tf.Graph().as_default():
generated_data = tf.ones([3, 4])
gan_model_fns = [functools.partial(get_dummy_gan_model, generated_data)]
spec = get_eval_estimator_spec(
gan_model_fns,
self._loss_fns,
gan_loss_kwargs={},
prepare_arguments_for_eval_metric_fn=None,
get_eval_metric_ops_fn=get_metrics,
add_summaries=not flags.FLAGS.use_tpu)
self.assertIsInstance(spec, TPUEstimatorSpec)
self.assertEqual(tf.estimator.ModeKeys.EVAL, spec.mode)
self.assertEqual(generated_data, spec.predictions)
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.eval_metrics)
def test_get_eval_estimator_spec_custom_metric_args(self):
with tf.Graph().as_default():
generated_data = tf.ones([3, 4])
gan_model_fns = [functools.partial(get_dummy_gan_model, generated_data)]
spec = get_eval_estimator_spec(
gan_model_fns,
self._loss_fns,
gan_loss_kwargs={},
prepare_arguments_for_eval_metric_fn=prepare_arguments_for_metric_fn,
get_eval_metric_ops_fn=get_metrics_custom_args,
add_summaries=not flags.FLAGS.use_tpu)
self.assertIsInstance(spec, TPUEstimatorSpec)
self.assertEqual(tf.estimator.ModeKeys.EVAL, spec.mode)
self.assertEqual(generated_data, spec.predictions)
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.eval_metrics)
def test_get_predict_estimator_spec(self):
with tf.Graph().as_default():
generated_data = tf.ones([3, 4])
gan_model_fns = [functools.partial(get_dummy_gan_model, generated_data)]
spec = get_predict_estimator_spec(gan_model_fns)
self.assertIsInstance(spec, TPUEstimatorSpec)
self.assertEqual(tf.estimator.ModeKeys.PREDICT, spec.mode)
self.assertEqual({'generated_data': generated_data}, spec.predictions)
class TPUGANEstimatorIntegrationTest(tf.test.TestCase, parameterized.TestCase):
"""Integration tests for TPUGANEstimator."""
def setUp(self):
super(TPUGANEstimatorIntegrationTest, self).setUp()
self._model_dir = tempfile.mkdtemp()
self._config = TpuRunConfig(model_dir=self._model_dir)
def tearDown(self):
super(TPUGANEstimatorIntegrationTest, self).tearDown()
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self,
train_input_fn,
eval_input_fn,
predict_input_fn,
prediction_size,
lr_decay=False,
joint_train=True):
def make_opt():
gstep = tf.compat.v1.train.get_or_create_global_step()
lr = tf.compat.v1.train.exponential_decay(1.0, gstep, 10, 0.9)
return tf.compat.v1.train.GradientDescentOptimizer(lr)
gopt = make_opt if lr_decay else tf.compat.v1.train.GradientDescentOptimizer(
1.0)
dopt = make_opt if lr_decay else tf.compat.v1.train.GradientDescentOptimizer(
1.0)
est = tfgan.estimator.TPUGANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
generator_optimizer=gopt,
discriminator_optimizer=dopt,
joint_train=joint_train,
get_eval_metric_ops_fn=get_metrics,
train_batch_size=4,
eval_batch_size=10,
predict_batch_size=8,
use_tpu=flags.FLAGS.use_tpu,
config=self._config)
# Train.
num_steps_train = 10
est.train(train_input_fn, steps=num_steps_train)
# Evaluate.
num_steps_eval = 2
scores = est.evaluate(eval_input_fn, steps=num_steps_eval)
self.assertIn(tf.compat.v1.GraphKeys.GLOBAL_STEP, six.iterkeys(scores))
self.assertIn('loss', six.iterkeys(scores))
self.assertAlmostEqual(
scores['discriminator_loss'], scores['loss'], places=4)
self.assertIn('mse_custom_metric', six.iterkeys(scores))
# Predict.
predictions = np.array(
[x['generated_data'] for x in est.predict(predict_input_fn)])
self.assertAllEqual(prediction_size, predictions.shape)
@parameterized.named_parameters(('joint_train', True, False, False),
('train_sequential', False, False, False),
('lr_decay', False, True, False),
('train_sequential_ds', False, False, True))
def test_numpy_input_fn(self, joint_train, lr_decay, return_ds):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
def train_input_fn(params):
data = np.zeros([input_dim], dtype=np.float32)
ds = (
tf.data.Dataset.from_tensors((data, data)).repeat().batch(
params['batch_size'], drop_remainder=True))
if return_ds:
return ds
else:
x, y = tf.compat.v1.data.make_one_shot_iterator(ds).get_next()
return x, y
def eval_input_fn(params):
data = np.zeros([input_dim], dtype=np.float32)
ds = (
tf.data.Dataset.from_tensors((data, data)).repeat().batch(
params['batch_size'], drop_remainder=True))
if return_ds:
return ds
else:
x, y = tf.compat.v1.data.make_one_shot_iterator(ds).get_next()
return x, y
predict_size = 10
def predict_input_fn(params):
del params # unused
data = np.zeros([input_dim], dtype=np.float32)
ds = (
tf.data.Dataset.from_tensors(data).repeat(predict_size).batch(
1, drop_remainder=True))
return ds
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[predict_size, input_dim],
lr_decay=lr_decay,
joint_train=joint_train)
class TPUGANEstimatorMultiTrainStepTest(tf.test.TestCase,
parameterized.TestCase):
"""Tests for TPU multistep logic."""
def setUp(self):
super(TPUGANEstimatorMultiTrainStepTest, self).setUp()
self._model_dir = tempfile.mkdtemp()
self._config = TpuRunConfig(model_dir=self._model_dir)
def tearDown(self):
super(TPUGANEstimatorMultiTrainStepTest, self).tearDown()
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
@parameterized.named_parameters(
('1:1 joint', 1, 1, True, 2, [0b10, 0b01], [0b10, 0b01]),
('1:1 seq', 1, 1, False, 2, [0b10], [0b01]),
('1:3 joint', 1, 3, True, 4, [0b0010, 0b0001], [0b1110, 0b1101]),
('1:3 seq', 1, 3, False, 4, [0b1000], [0b0111]),
('3:1 joint', 3, 1, True, 4, [0b1110, 0b1101], [0b0010, 0b0001]),
('3:1 seq', 3, 1, False, 4, [0b1110], [0b0001]),
('1:0 seq', 1, 0, False, 1, [0b1], None),
('0:1 seq', 0, 1, False, 1, None, [0b1]))
def test_train(self, g_steps, d_steps, joint_train, expected_total_substeps,
expected_g_substep_mask, expected_d_substep_mask):
real_opt = tf.compat.v1.train.GradientDescentOptimizer(1e-2)
gopt = TestOptimizerWrapper(real_opt, name='g_opt')
dopt = TestOptimizerWrapper(real_opt, name='d_opt')
est = tfgan.estimator.TPUGANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
generator_optimizer=gopt,
discriminator_optimizer=dopt,
gan_train_steps=tfgan.GANTrainSteps(g_steps, d_steps),
joint_train=joint_train,
get_eval_metric_ops_fn=get_metrics,
train_batch_size=4,
eval_batch_size=10,
predict_batch_size=8,
use_tpu=flags.FLAGS.use_tpu,
config=self._config)
def train_input_fn(params):
data = tf.ones([params['batch_size'], 4], dtype=tf.float32)
return data, data
est.train(train_input_fn, steps=1)
self.assertEqual(1, est.get_variable_value('global_step'))
substep_counter_name = 'discriminator_train/substep_counter'
if d_steps == 0:
substep_counter_name = 'generator_train/substep_counter'
substep_counter = est.get_variable_value(substep_counter_name)
self.assertEqual(expected_total_substeps, substep_counter)
if expected_g_substep_mask is not None:
g_substep_mask = est.get_variable_value('generator_train/substep_mask')
self.assertIn(g_substep_mask, expected_g_substep_mask)
if expected_d_substep_mask is not None:
d_substep_mask = est.get_variable_value(
'discriminator_train/substep_mask')
self.assertIn(d_substep_mask, expected_d_substep_mask)
class TPUGANEstimatorWarmStartTest(tf.test.TestCase):
"""Tests that TPUGANEstimator can be warm-started."""
def setUp(self):
super(TPUGANEstimatorWarmStartTest, self).setUp()
self._model_dir = self.get_temp_dir()
self._config = TpuRunConfig(model_dir=self._model_dir)
self.new_variable_name = 'new_var'
self.new_variable_value = [1.0, 2.0, 3.0]
def tearDown(self):
super(TPUGANEstimatorWarmStartTest, self).tearDown()
tf.compat.v1.summary.FileWriterCache.clear()
def _test_warm_start(self, warm_start_from=None):
"""Tests whether WarmStartSettings work as intended."""
def generator_with_new_variable(noise_dict, mode):
tf.compat.v1.get_variable(
name=self.new_variable_name,
initializer=self.new_variable_value,
trainable=True)
return generator_fn(noise_dict, mode)
est = tfgan.estimator.TPUGANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
generator_optimizer=tf.compat.v1.train.GradientDescentOptimizer(1.0),
discriminator_optimizer=tf.compat.v1.train.GradientDescentOptimizer(
1.0),
train_batch_size=4,
use_tpu=flags.FLAGS.use_tpu,
config=self._config)
def train_input_fn(params):
data = tf.zeros([params['batch_size'], 4], dtype=tf.float32)
return data, data
est.train(train_input_fn, steps=1)
est_warm = tfgan.estimator.TPUGANEstimator(
generator_fn=generator_with_new_variable,
discriminator_fn=discriminator_fn,
generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
generator_optimizer=tf.compat.v1.train.GradientDescentOptimizer(1.0),
discriminator_optimizer=tf.compat.v1.train.GradientDescentOptimizer(
1.0),
config=TpuRunConfig(
model_dir=None if warm_start_from else self._model_dir),
train_batch_size=4,
use_tpu=flags.FLAGS.use_tpu,
warm_start_from=warm_start_from)
est_warm.train(train_input_fn, steps=1)
return est_warm
def test_warm_start_error(self):
"""Test if exception when reloading different estimators."""
with self.assertRaises(tf.errors.NotFoundError):
self._test_warm_start()
def test_warm_start_success(self):
"""Test if GANEstimator allows explicit warm start variable assignment."""
# Regex matches all variable names in ckpt except for new_var.
var_regex = '^(?!.*%s.*)' % self.new_variable_name
warmstart = tf.estimator.WarmStartSettings(
ckpt_to_initialize_from=self._model_dir, vars_to_warm_start=var_regex)
est_warm = self._test_warm_start(warm_start_from=warmstart)
full_variable_name = 'Generator/%s' % self.new_variable_name
self.assertIn(full_variable_name, est_warm.get_variable_names())
equal_vals = np.array_equal(
est_warm.get_variable_value(full_variable_name),
self.new_variable_value)
self.assertTrue(equal_vals)
```
#### File: python/features/clip_weights_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
import tensorflow_gan as tfgan
class ClipWeightsTest(tf.test.TestCase):
"""Tests for `discriminator_weight_clip`."""
def setUp(self):
super(ClipWeightsTest, self).setUp()
self.variables = [tf.Variable(2.0)]
self.tuple = collections.namedtuple('VarTuple',
['discriminator_variables'])(
self.variables)
def _test_weight_clipping_helper(self, use_tuple):
loss = self.variables[0]
opt = tf.compat.v1.train.GradientDescentOptimizer(1.0)
if use_tuple:
opt_clip = tfgan.features.clip_variables(opt, self.variables, 0.1)
else:
opt_clip = tfgan.features.clip_discriminator_weights(opt, self.tuple, 0.1)
train_op1 = opt.minimize(loss, var_list=self.variables)
train_op2 = opt_clip.minimize(loss, var_list=self.variables)
with self.cached_session(use_gpu=True) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
self.assertEqual(2.0, sess.run(self.variables[0]))
sess.run(train_op1)
self.assertLess(0.1, sess.run(self.variables[0]))
with self.cached_session(use_gpu=True) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
self.assertEqual(2.0, sess.run(self.variables[0]))
sess.run(train_op2)
self.assertNear(0.1, sess.run(self.variables[0]), 1e-7)
def test_weight_clipping_argsonly(self):
if tf.executing_eagerly():
# Optimizers work differently in eager.
return
self._test_weight_clipping_helper(False)
def test_weight_clipping_ganmodel(self):
if tf.executing_eagerly():
# Optimizers work differently in eager.
return
self._test_weight_clipping_helper(True)
def _test_incorrect_weight_clip_value_helper(self, use_tuple):
opt = tf.compat.v1.train.GradientDescentOptimizer(1.0)
if use_tuple:
with self.assertRaisesRegexp(ValueError, 'must be positive'):
tfgan.features.clip_discriminator_weights(
opt, self.tuple, weight_clip=-1)
else:
with self.assertRaisesRegexp(ValueError, 'must be positive'):
tfgan.features.clip_variables(opt, self.variables, weight_clip=-1)
def test_incorrect_weight_clip_value_argsonly(self):
self._test_incorrect_weight_clip_value_helper(False)
def test_incorrect_weight_clip_value_tuple(self):
self._test_incorrect_weight_clip_value_helper(True)
if __name__ == '__main__':
tf.test.main()
```
#### File: python/features/conditioning_utils.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
__all__ = [
'condition_tensor',
'condition_tensor_from_onehot',
]
def _get_shape(tensor):
tensor_shape = tf.shape(input=tensor)
static_tensor_shape = tf.get_static_value(tensor_shape)
if static_tensor_shape is None:
return tensor_shape
else:
return static_tensor_shape
def condition_tensor(tensor, conditioning):
"""Condition the value of a tensor.
Conditioning scheme based on https://arxiv.org/abs/1609.03499.
Args:
tensor: A minibatch tensor to be conditioned.
conditioning: A minibatch Tensor of to condition on. Must be 2D, with first
dimension the same as `tensor`.
Returns:
`tensor` conditioned on `conditioning`.
Raises:
ValueError: If the non-batch dimensions of `tensor` aren't fully defined.
ValueError: If `conditioning` isn't at least 2D.
ValueError: If the batch dimension for the input Tensors don't match.
"""
tensor.shape[1:].assert_is_fully_defined()
num_features = tensor.shape[1:].num_elements()
if conditioning.shape.ndims < 2:
raise ValueError('conditioning must be at least 2D, but saw shape: %s'
% conditioning.shape)
mapped_conditioning = tf.compat.v1.layers.dense(
tf.compat.v1.layers.flatten(conditioning),
num_features,
kernel_initializer=tf.compat.v1.glorot_uniform_initializer())
if not mapped_conditioning.shape.is_compatible_with(tensor.shape):
mapped_conditioning = tf.reshape(mapped_conditioning, _get_shape(tensor))
return tensor + mapped_conditioning
def _one_hot_to_embedding(one_hot, embedding_size):
"""Get a dense embedding vector from a one-hot encoding."""
num_tokens = one_hot.shape[1]
label_id = tf.argmax(input=one_hot, axis=1)
embedding = tf.compat.v1.get_variable(
'embedding', [num_tokens, embedding_size])
return tf.nn.embedding_lookup(
params=embedding, ids=label_id, name='token_to_embedding')
def _validate_onehot(one_hot_labels):
one_hot_labels.shape.assert_has_rank(2)
one_hot_labels.shape[1:].assert_is_fully_defined()
def condition_tensor_from_onehot(tensor, one_hot_labels, embedding_size=256):
"""Condition a tensor based on a one-hot tensor.
Conditioning scheme based on https://arxiv.org/abs/1609.03499.
Args:
tensor: Tensor to be conditioned.
one_hot_labels: A Tensor of one-hot labels. Shape is
[batch_size, num_classes].
embedding_size: The size of the class embedding.
Returns:
`tensor` conditioned on `one_hot_labels`.
Raises:
ValueError: `one_hot_labels` isn't 2D, if non-batch dimensions aren't
fully defined, or if batch sizes don't match.
"""
_validate_onehot(one_hot_labels)
conditioning = _one_hot_to_embedding(one_hot_labels, embedding_size)
return condition_tensor(tensor, conditioning)
```
#### File: python/losses/tuple_losses_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
import tensorflow_gan as tfgan
from tensorflow_gan.python.losses.tuple_losses import __all__ as tuple_all
from tensorflow_gan.python.losses.tuple_losses import args_to_gan_model
class ArgsToGanModelTest(tf.test.TestCase):
def testargs_to_gan_model(self):
"""Test `args_to_gan_model`."""
tuple_type = collections.namedtuple('fake_type', ['arg1', 'arg3'])
def args_loss(arg1, arg2, arg3=3, arg4=4):
return arg1 + arg2 + arg3 + arg4
gan_model_loss = args_to_gan_model(args_loss)
# Value is correct.
self.assertEqual(1 + 2 + 5 + 6,
gan_model_loss(tuple_type(1, 2), arg2=5, arg4=6))
# Uses tuple argument with defaults.
self.assertEqual(1 + 5 + 3 + 7,
gan_model_loss(tuple_type(1, None), arg2=5, arg4=7))
# Uses non-tuple argument with defaults.
self.assertEqual(1 + 5 + 2 + 4, gan_model_loss(tuple_type(1, 2), arg2=5))
# Requires non-tuple, non-default arguments.
with self.assertRaisesRegexp(ValueError, '`arg2` must be supplied'):
gan_model_loss(tuple_type(1, 2))
# Can't pass tuple argument outside tuple.
with self.assertRaisesRegexp(ValueError,
'present in both the tuple and keyword args'):
gan_model_loss(tuple_type(1, 2), arg2=1, arg3=5)
def testargs_to_gan_model_name(self):
"""Test that `args_to_gan_model` produces correctly named functions."""
def loss_fn(x):
return x
new_loss_fn = args_to_gan_model(loss_fn)
self.assertEqual('loss_fn', new_loss_fn.__name__)
self.assertTrue('The gan_model version of' in new_loss_fn.__docstring__)
def test_tuple_respects_optional_args(self):
"""Test that optional args can be changed with tuple losses."""
tuple_type = collections.namedtuple('fake_type', ['arg1', 'arg2'])
def args_loss(arg1, arg2, arg3=3):
return arg1 + 2 * arg2 + 3 * arg3
loss_fn = args_to_gan_model(args_loss)
loss = loss_fn(tuple_type(arg1=-1, arg2=2), arg3=4)
# If `arg3` were not set properly, this value would be different.
self.assertEqual(-1 + 2 * 2 + 3 * 4, loss)
def test_works_with_child_classes(self):
"""`args_to_gan_model` should work with classes derived from namedtuple."""
tuple_type = collections.namedtuple('fake_type', ['arg1', 'arg2'])
class InheritedType(tuple_type):
pass
def args_loss(arg1, arg2, arg3=3):
return arg1 + 2 * arg2 + 3 * arg3
loss_fn = args_to_gan_model(args_loss)
loss = loss_fn(InheritedType(arg1=-1, arg2=2), arg3=4)
# If `arg3` were not set properly, this value would be different.
self.assertEqual(-1 + 2 * 2 + 3 * 4, loss)
class ConsistentLossesTest(tf.test.TestCase):
pass
def _tuple_from_dict(args_dict):
return collections.namedtuple('Tuple', args_dict.keys())(**args_dict)
def add_loss_consistency_test(test_class, loss_name_str, loss_args):
tuple_loss = getattr(tfgan.losses, loss_name_str)
arg_loss = getattr(tfgan.losses.wargs, loss_name_str)
def consistency_test(self):
self.assertEqual(arg_loss.__name__, tuple_loss.__name__)
with self.cached_session() as sess:
self.assertEqual(
sess.run(arg_loss(**loss_args)),
sess.run(tuple_loss(_tuple_from_dict(loss_args))))
test_name = 'test_loss_consistency_%s' % loss_name_str
setattr(test_class, test_name, consistency_test)
# A list of consistency tests which need to be manually written.
manual_tests = [
'acgan_discriminator_loss', 'acgan_generator_loss',
'combine_adversarial_loss', 'mutual_information_penalty',
'wasserstein_gradient_penalty', 'cycle_consistency_loss',
'stargan_generator_loss_wrapper', 'stargan_discriminator_loss_wrapper',
'stargan_gradient_penalty_wrapper'
]
discriminator_keyword_args = {
'discriminator_real_outputs':
np.array([[3.4, 2.3, -2.3], [6.3, -2.1, 0.2]]),
'discriminator_gen_outputs':
np.array([[6.2, -1.5, 2.3], [-2.9, -5.1, 0.1]]),
}
generator_keyword_args = {
'discriminator_gen_outputs':
np.array([[6.2, -1.5, 2.3], [-2.9, -5.1, 0.1]]),
}
class CycleConsistencyLossTest(tf.test.TestCase):
def setUp(self):
super(CycleConsistencyLossTest, self).setUp()
def _partial_model(generator_inputs_np):
model = tfgan.GANModel(*[None] * 11)
return model._replace(
generator_inputs=tf.constant(generator_inputs_np, dtype=tf.float32))
self._model_x2y = _partial_model([1, 2])
self._model_y2x = _partial_model([5, 6])
def test_model_type(self):
"""Test the input model type for `cycle_consistency_loss`."""
with self.assertRaises(ValueError):
tfgan.losses.cycle_consistency_loss(self._model_x2y)
def test_correct_loss(self):
"""Test the output of `cycle_consistency_loss`."""
loss = tfgan.losses.cycle_consistency_loss(
tfgan.CycleGANModel(
model_x2y=self._model_x2y,
model_y2x=self._model_y2x,
reconstructed_x=tf.constant([9, 8], dtype=tf.float32),
reconstructed_y=tf.constant([7, 2], dtype=tf.float32)))
with self.cached_session(use_gpu=True) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
self.assertNear(5.0, sess.run(loss), 1e-5)
class StarGANLossWrapperTest(tf.test.TestCase):
def setUp(self):
super(StarGANLossWrapperTest, self).setUp()
self.input_data = tf.ones([1, 2, 2, 3])
self.input_data_domain_label = tf.constant([[0, 1]])
self.generated_data = tf.ones([1, 2, 2, 3])
self.discriminator_input_data_source_predication = tf.ones([1])
self.discriminator_generated_data_source_predication = tf.ones([1])
def _discriminator_fn(inputs, num_domains):
"""Differentiable dummy discriminator for StarGAN."""
hidden = tf.compat.v1.layers.flatten(inputs)
output_src = tf.reduce_mean(input_tensor=hidden, axis=1)
output_cls = tf.compat.v1.layers.dense(hidden, num_domains)
return output_src, output_cls
with tf.compat.v1.variable_scope('discriminator') as dis_scope:
pass
self.model = tfgan.StarGANModel(
input_data=self.input_data,
input_data_domain_label=self.input_data_domain_label,
generated_data=self.generated_data,
generated_data_domain_target=None,
reconstructed_data=None,
discriminator_input_data_source_predication=self
.discriminator_input_data_source_predication,
discriminator_generated_data_source_predication=self
.discriminator_generated_data_source_predication,
discriminator_input_data_domain_predication=None,
discriminator_generated_data_domain_predication=None,
generator_variables=None,
generator_scope=None,
generator_fn=None,
discriminator_variables=None,
discriminator_scope=dis_scope,
discriminator_fn=_discriminator_fn)
self.discriminator_fn = _discriminator_fn
self.discriminator_scope = dis_scope
def test_stargan_generator_loss_wrapper(self):
"""Test StarGAN generator loss wrapper."""
loss_fn = tfgan.losses.wargs.wasserstein_generator_loss
wrapped_loss_fn = tfgan.losses.stargan_generator_loss_wrapper(loss_fn)
loss_result_tensor = loss_fn(
self.discriminator_generated_data_source_predication)
wrapped_loss_result_tensor = wrapped_loss_fn(self.model)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
loss_result, wrapped_loss_result = sess.run(
[loss_result_tensor, wrapped_loss_result_tensor])
self.assertAlmostEqual(loss_result, wrapped_loss_result)
def test_stargan_discriminator_loss_wrapper(self):
"""Test StarGAN discriminator loss wrapper."""
loss_fn = tfgan.losses.wargs.wasserstein_discriminator_loss
wrapped_loss_fn = tfgan.losses.stargan_discriminator_loss_wrapper(loss_fn)
loss_result_tensor = loss_fn(
self.discriminator_generated_data_source_predication,
self.discriminator_generated_data_source_predication)
wrapped_loss_result_tensor = wrapped_loss_fn(self.model)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
loss_result, wrapped_loss_result = sess.run(
[loss_result_tensor, wrapped_loss_result_tensor])
self.assertAlmostEqual(loss_result, wrapped_loss_result)
def test_stargan_gradient_penalty_wrapper(self):
"""Test StaGAN gradient penalty wrapper.
Notes:
The random interpolates are handled by given setting the reconstruction to
be the same as the input.
"""
if tf.executing_eagerly():
# Can't use `tf.gradient` when executing eagerly
return
loss_fn = tfgan.losses.wargs.wasserstein_gradient_penalty
tfgan.losses.stargan_gradient_penalty_wrapper(loss_fn)
wrapped_loss_fn = tfgan.losses.stargan_gradient_penalty_wrapper(loss_fn)
loss_result_tensor = loss_fn(
real_data=self.input_data,
generated_data=self.generated_data,
generator_inputs=self.input_data_domain_label.shape.as_list()[-1],
discriminator_fn=self.discriminator_fn,
discriminator_scope=self.discriminator_scope)
wrapped_loss_result_tensor = wrapped_loss_fn(self.model)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
loss_result, wrapped_loss_result = sess.run(
[loss_result_tensor, wrapped_loss_result_tensor])
self.assertAlmostEqual(loss_result, wrapped_loss_result)
if __name__ == '__main__':
for loss_name in tuple_all:
if loss_name in manual_tests:
continue
if 'generator' in loss_name:
keyword_args = generator_keyword_args
else:
keyword_args = discriminator_keyword_args
add_loss_consistency_test(ConsistentLossesTest, loss_name, keyword_args)
tf.test.main()
``` |
{
"source": "jiaseny/KDSD",
"score": 3
} |
#### File: jiaseny/KDSD/kernels.py
```python
from __future__ import division
from gk_wl import * # Graph kernels
from util import *
def hamming_kernel(x, y):
"""
NOTE: The kernel matrix K is not symmetric, since in general
K(x[i], y[j]) != K(x[j], y[i])
"""
x = np.atleast_2d(x)
y = np.atleast_2d(y)
assert x.shape[1] == y.shape[1] # d
K = 1. - cdist(x, y, "Hamming")
assert_shape(K, (x.shape[0], y.shape[0]))
return K
def exp_hamming_kernel(x, y):
"""
NOTE: The kernel matrix K is not symmetric, since in general
K(x[i], y[j]) != K(x[j], y[i])
"""
x = np.atleast_2d(x)
y = np.atleast_2d(y)
assert x.shape[1] == y.shape[1] # d
K = np.exp(-cdist(x, y, "Hamming"))
assert_shape(K, (x.shape[0], y.shape[0]))
return K
def wl_kernel_graph(g1_list, g2_list, h=2):
"""
Computes the Weisfeiler-Lehman graph kernel.
Args:
g1_list: list of ig.Graph objects.
g1_list: list of ig.Graph objects.
h: int, number of iterations in the W-L algorithm.
"""
n1 = len(g1_list)
n2 = len(g2_list)
g_list = np.concatenate([g1_list, g2_list])
res = GK_WL(h=h).compare_pairwise(g_list)
K = res[n1:, :n2]
assert_shape(K, (n1, n2))
return K
def wl_kernel(x, y, h=2):
"""
Computes the Weisfeiler-Lehman graph kernel.
Args:
x, y: array((n, p)), n graphs, each row representing the upper-
triangular entries (excluding diagonal) of the adjacency matrix.
h: int, number of iterations in the W-L algorithm.
"""
x = np.atleast_2d(x)
y = np.atleast_2d(y)
assert x.shape[1] == y.shape[1] # d
p = x.shape[1] # d*(d-1)/2
d = int((1 + np.sqrt(1+8*p)) / 2.) # Number of nodes
assert 2*p == d*(d-1)
n1 = x.shape[0]
n2 = y.shape[0]
z = np.vstack([x, y]) # (n1 + n2, d)
g_list = [get_graph(d, row) for row in z]
res = GK_WL(h=h).compare_pairwise(g_list)
K = res[n1:, :n2]
assert_shape(K, (n1, n2))
return K
def get_graph(d, x):
"""
Read a graph.
Args:
d: int, number of nodes.
x: array, upper-triangular part of the adjacency matrix.
"""
assert len(x) == d*(d-1)/2.
A = np.zeros((d, d))
A[np.triu_indices(d, k=1)] = x # Set upper-triangle (excluding diagonal)
g = ig.Graph.Adjacency(A.tolist(), 'upper')
assert not g.is_directed() # Check undirected
return g
```
#### File: jiaseny/KDSD/ksd_utils.py
```python
from __future__ import division
from util import *
def ksd_bootstrap(kappa_vals, n_boot=5000):
"""
Implements the multinomial bootstrap method for independent samples in
Liu et al. (2016).
Args:
kappa_vals: array(n, n), pre-computed KSD kernel matrix.
n_boot: int, number of bootstrap samples.
Returns:
boot_samples: array(n_boot), bootstrap samples.
"""
n = kappa_vals.shape[0] # Sample size
kappa_vals = kappa_vals - np.diag(np.diag(kappa_vals)) # Remove diagonal
# Bootstrap samples for KSD estimates
boot_samples = np.zeros(n_boot)
for j in xrange(n_boot):
wvec = (rand.multinomial(n=n, pvals=np.ones(n)/n) - 1.) / n
boot_samples[j] = wvec.dot(kappa_vals).dot(wvec)
return boot_samples
def ksd_est(kappa_vals_list):
"""
Given a list of pre-computed kappa values, compute the U- and V-statistics
estimates for KSD.
Args:
n: int, sample size.
kappa_vals_list: list of array((n, n)), list of pre-computed kappa's.
Returns: (all lists have same length as kappa_vals_list)
ustats: list, U-statistics KSD estimate.
vstats: list, V-statistics KSD estimate.
"""
n = kappa_vals_list[0].shape[0] # Sample size
assert all(kappa_vals.shape == (n, n) for kappa_vals in kappa_vals_list)
ustats = np.zeros(len(kappa_vals_list)) # U-stat
vstats = np.zeros(len(kappa_vals_list)) # U-stat
for i, kappa_vals in enumerate(kappa_vals_list):
diag_vals = np.diag(np.diag(kappa_vals)) # (n, n) diagonal matrix
ustats[i] = np.sum(kappa_vals - diag_vals) / (n * (n-1)) # U-stat
vstats[i] = np.sum(kappa_vals) / (n**2) # V-stat
return ustats, vstats
def ksd_boot(kappa_vals_list, quantile=.95, n_boot=1000):
"""
Given a list of pre-computed kappa values, compute
the bootstrap sampling distribution for KSD; and
the critical threshold of the KSD test obtained by taking
some quantile of the bootstrap sampling distribution.
Args:
n: int, sample size.
kappa_vals_list: list of array((n, n)), list of pre-computed kappa's.
Returns: (all lists have same length as kappa_vals_list)
ksd_boot_list: list of lists, samples from the bootstrap distribution.
boot_thres: list, critical threshold for KSD test.
"""
# Bootstrap estimates
boot_list = [ksd_bootstrap(kappa_vals, n_boot=n_boot)
for kappa_vals in kappa_vals_list]
# Compute quantile of bootstrap sampling distribution
boot_thres = [np.percentile(boot, 100.*quantile) for boot in boot_list]
return boot_list, boot_thres
def ksd_pvalue(boot_list, ustats):
"""
Computes the p-value of the KSD test.
Args:
boot_list: list, list of bootstrap statistics.
ustats: float, value of computed test statistic.
"""
assert len(boot_list) == len(ustats)
pvals = [np.mean(boot >= ustats)
for boot, ustats in izip(boot_list, ustats)]
return pvals
``` |
{
"source": "jiaseny/lspp",
"score": 2
} |
#### File: jiaseny/lspp/hawkes_simple.py
```python
from __future__ import division
from helper import *
from point_process import PointProcess
class HawkesSimple(PointProcess):
"""
Simple hawkes process model.
Members:
# ---------------- Constants ----------------------- #
# #
# N int number of nodes #
# B int number of kernels #
# num_events int number of events #
# T float time of last event #
# #
# ---------------- Event-related ------------------- #
# #
# events list of tuples (i, j, t) #
# node_events dict {(i, j): [t's]} << N**2 #
# #
# ---------------- Parameters ---------------------- #
# #
# gamma array N * N #
# xi array N * N * B #
# #
# ---------------- Hyper-parameters ---------------- #
# #
# prior_gamma tuple 2 #
# prior_xi array B #
# #
# ---------------- Cached statistics --------------- #
# #
# delta {(u, v): array(B * num_events(u, v)))} #
# Delta array N * N * B #
# #
# -------------------------------------------------- #
"""
def __init__(self, num_nodes, events, end_time):
"""
Initialize parameters for the Hawkes process.
Args:
num_nodes: An integer, the number of nodes.
events: A list containing 3-tuples: [(i, j, t)], where (i, j, t)
indicates that an edge from i to j appeared at time t.
Assume that time points start at 0.
"""
super(HawkesSimple, self).__init__(num_nodes, events, end_time)
# Model parameters
self.gamma = np.zeros((self.N, self.N))
self.xi = np.zeros((self.N, self.N, self.B))
# Hyper-prior parameters (shared across all params)
# Gamma shape and scale parameters for base rate and each kernel
self.priors = np.hstack((np.ones((1 + self.B, 1)),
np.ones((1 + self.B, 1))))
self.num_params = 2 * (1 + self.B)
# # Posteriors (shared across all params)
# self.post_gamma = (0, 0)
# self.post_xi = np.zeros(self.B)
# Sufficient statistics
self.delta = None
self.Delta = None
self.suff_stats_cached = False # Flag
self.update_suff_stats()
# Cache log-likelihood computations for efficiency
# self.loglik_cached = np.zeros((self.N, self.N), dtype=bool) # False
# self.loglik_values = np.zeros((self.N, self.N)) + np.nan
return
# ----------------------------------------------------------------------- #
# Intensity/likelihood related functions
# ----------------------------------------------------------------------- #
def update_suff_stats(self):
"""
Pre-compute the sufficient statistics for the event times:
\delta^{v,u}_{b,i} = \sum_{k: t^{v,u}_k < t^{u,v}_i}
\phi_b(t^{u,v}_i - t^{v,u}_k)
\Delta^{v,u}_{b,T} = \sum_k (\Phi_b(T - t^{v,u}_k) - \Phi_b(0))
and updates self.delta and self.Delta.
Returns:
self.delta, dict of arrays.
self.Delta, array.
"""
self.delta = {(u, v): np.zeros((self.B, self.num_node_events(u, v)))
for u in range(self.N) for v in range(self.N)}
self.Delta = np.zeros((self.N, self.N, self.B))
for u in range(self.N):
for v in range(self.N):
times = self.get_node_events(u, v)
recip_times = np.array(self.get_node_events(v, u))
# Compute \delta^{(v,u)}_{b,i}
for i, t in enumerate(times):
dt = t - recip_times[recip_times < t] # np.array
recip_sum = np.sum(self.kernels(dt), axis=1) # B * 1
self.delta[(u, v)][:, i] = recip_sum
# Compute \Delta^{(v,u)}_{b,T}
dT = self.T - recip_times[recip_times < self.T] # np.array
recip_sum = np.sum(self.kernels(dT, type='cdf'), axis=1) - \
len(dT) * np.sum(self.kernels(0, type='cdf'), axis=1)
self.Delta[u, v, :] = recip_sum # B * 1
self.suff_stats_cached = True
return self.delta, self.Delta
def base_rate(self, u, v):
"""
Computes the base rate \gamma_{pq} n_p n_q for given 1 <= u, v <= N.
Args:
u, v: Integers specifying the node indices.
Returns:
A float, the computed base rate value.
"""
return self.gamma[u, v]
def intensity(self, u, v, times):
"""
Computes the intensity function \lambda_{pq} for given 1 <= u, v <= N
evaluated at each time point in times.
Args:
u, v: Integers specifying the node indices.
times: List of time points to be evaluated on or a single number.
Returns:
An np.array containing the values of \lambda_{pq} evaluated at each
time point in times. If times is a single number, then return the
intensity value (float) evaluated at that time point.
"""
assert u in range(self.N) and v in range(self.N)
if isinstance(times, float) or isinstance(times, int):
# Return intensity value at a single time point
return self.intensity(u, v, [times])[0]
lambdas = np.zeros(len(times)) + self.base_rate(u, v)
# Reciprocal component
recip_times = np.array(self.get_node_events(v, u))
for i, t in enumerate(times):
dt = t - recip_times[recip_times < t] # np.array
recip_sum = np.sum(self.kernels(dt), axis=1) # B * 1
lambdas[i] += np.dot(self.xi[u, v], recip_sum)
return lambdas
def intensity_fast(self, u, v):
"""
Computes the intensity function \lambda_{pq} for given 1 <= u, v <= N
evaluated at each time point in self.get_node_events(u, v) using the
cached sufficient statistics self.delta.
NOTE: self.delta[(u, v)] is of dimension B * n_{pq}.
Args:
u, v: Integers specifying the node indices.
Returns:
An np.array containing the values of \lambda_{pq} evaluated at each
time point in self.node_events[(u, v)].
"""
assert u in range(self.N) and v in range(self.N)
if not self.suff_stats_cached:
self.update_suff_stats()
lambdas = self.base_rate(u, v)
lambdas += np.dot(self.xi[u, v], self.delta[(u, v)])
assert_equal(len(lambdas), self.num_node_events(u, v))
return lambdas
def integrated_intensity(self, u, v, T=None):
"""
Computes the value of the integrated intensity function
\Lambda_{pq}(0, t) for given 1 <= u, v <= N.
Args:
u, v: Integers specifying the node indices.
T: Float, until time T. Default is self.T.
Returns:
Float, value of the integrated intensity function.
"""
if T is None:
T = self.T
else:
assert T >= 0
recip_times = np.array(self.get_node_events(v, u))
dT = T - recip_times[recip_times < T] # np.array
recip_sum = np.sum(self.kernels(dT, type='cdf'), axis=1) # B * 1
recip_sum -= len(dT) * np.sum(self.kernels(0, type='cdf'), axis=1)
return self.base_rate(u, v) * T + np.dot(self.xi[u, v], recip_sum)
def integrated_intensity_fast(self, u, v):
"""
Computes the value of the integrated intensity function
\Lambda_{pq}(0, self.T) for given 1 <= u, v <= N using cached
sufficient statistics self.Delta.
NOTE: self.delta[(u, v)] is of dimension B * n_{pq}.
Args:
u, v: Integers specifying the node indices.
Returns:
Float, value of the integrated intensity function.
"""
if not self.suff_stats_cached:
self.update_suff_stats()
temp = self.base_rate(u, v) * self.T
temp += np.dot(self.xi[u, v], self.Delta[(u, v)])
return temp
def predict_probs(self, t0, delta):
"""
Computes the predicted probability that a link from u to v appears in
[t, t + delta) based only on the events data from [0, t)
for all combinations of u and v.
"""
N = self.N
t1 = t0 + delta
prob_dict = np.zeros((N, N)) # Predicted probs that link exists
for u in range(N):
for v in range(N):
recip_times = [t for t in self.get_node_events(v, u) if t < t0]
recip_times = np.array(recip_times)
temp0 = self.kernels(t0 - recip_times, type='cdf')
temp1 = self.kernels(t0 + delta - recip_times, type='cdf')
recip_sum = np.sum(temp1 - temp0, axis=1) # B * 1
Lambda = self.gamma[u, v] * delta
Lambda += np.dot(self.xi[u, v], recip_sum)
prob_dict[u, v] = 1. - np.exp(-Lambda)
return prob_dict
def predict_receiver(self, u, t):
"""
Predicts the recipient probs for a message sent from u at time t.
"""
vals = [self.intensity(u, v, t) for v in range(self.N)]
vals[u] == 0
probs = normalize(vals)
return probs
# ----------------------------------------------------------------------- #
# MLE & MCMC
# ----------------------------------------------------------------------- #
def loglik(self, gamma, xi):
"""
Computes the log-likelihood function with all parameters tied.
"""
if not self.suff_stats_cached:
self.update_suff_stats()
loglik = 0
for u in range(self.N):
for v in range(self.N):
gterm = -gamma * self.T
bterm = -np.dot(xi, self.Delta[u, v])
lterm = np.sum(np.log(gamma +
np.dot(xi, self.delta[(u, v)][:, i]))
for i in range(self.num_node_events(u, v)))
loglik += gterm + bterm + lterm
# for u in range(self.N):
# for v in range(self.N):
# temp = -self.integrated_intensity_fast(u, v)
# temp += np.sum(np.log(self.intensity_fast(u, v)))
# loglik += temp
# loglik -= gamma + np.sum(xi) # Prior
return loglik
def loglik_grad(self, gamma, xi):
"""
Compute the gradient evaluated at gamma, xi.
"""
if not self.suff_stats_cached:
self.update_suff_stats()
# Unroll
gamma = np.array(gamma)
if len(gamma.shape) == 0:
gamma = gamma[np.newaxis]
if len(gamma.shape) == 1: # 0-D or 1-D array
gamma = gamma[:, np.newaxis]
num = gamma.shape[0]
gradient = np.zeros((len(gamma), 1 + self.B))
for u in range(self.N):
for v in range(self.N):
Delta = np.insert(self.Delta[u, v], 0, self.T)
Delta = np.tile(Delta, (num, 1))
for i in range(self.num_node_events(u, v)):
delta = np.insert(self.delta[(u, v)][:, i], 0, 1.)
delta = np.tile(delta, (num, 1))
denom = gamma + np.dot(xi, np.reshape(self.delta[(u, v)][:, i], (self.B, 1)))
denom = np.tile(denom, (1, 1+self.B))
gradient += delta/denom
gradient -= Delta
# gradient -= 1. # Prior
return gradient[0] if len(gradient) == 1 else gradient
def loglik_hess(self, gamma, xi):
"""
Compute the Hessian matrix evaluated at gamma, xi.
"""
if not self.suff_stats_cached:
self.update_suff_stats()
hessian = np.zeros((1 + self.B, 1 + self.B))
for u in range(self.N):
for v in range(self.N):
for i in range(self.num_node_events(u, v)):
delta = np.insert(self.delta[(u, v)][:, i], 0, 1.)
temp = np.outer(delta, delta)
denom = (gamma + np.dot(xi, self.delta[(u, v)][:, i]))**2
hessian -= temp/denom
return hessian
def mle(self, method='grad-ascent', **kwargs):
"""
Computes the MLE with all parameters tied.
"""
def neg_loglik_obj(x):
"""
Computes the negative log-likelihood value.
Args:
x: array; x[0] := gamma and x[1:] := xi[:-1]
"""
gamma, xi = self.unpack_params(x)
return -self.loglik(gamma, xi)
def neg_loglik_obj_grad(x):
"""
Computes the negative log-likelihood value.
Args:
x: array; x[0] := gamma and x[1:] := xi[:-1]
"""
gamma, xi = self.unpack_params(x)
return -self.loglik_grad(gamma, xi)
if method == 'grad-ascent':
bounds = zip([_EPS] + [_EPS] * self.B,
[None] + [None] * self.B) # 1-_EPS
gamma_init = rand.uniform()
xi_init = rand.uniform(size=self.B)
x_init = np.hstack((gamma_init, xi_init))
res = minimize(neg_loglik_obj,
jac=neg_loglik_obj_grad,
x0=x_init,
method='L-BFGS-B', bounds=bounds, **kwargs)
assert res.success, "MLE optimization failed ..."
x = res.x if res.success else None
elif method == 'coord-ascent':
x, _, _ = coord_descent(obj_fun=neg_loglik_obj,
num_params=1+n, **kwargs)
else:
print "MLE %s method not understood!" % method
mle_params = self.unpack_params(x)
self.set_mle_params(mle_params)
return mle_params
# ----------------------------------------------------------------------- #
# Book-keeping
# ----------------------------------------------------------------------- #
def unpack_params(self, x):
"""
Args:
x: array; x[0] := gamma and x[1:] := xi[:-1]
"""
assert_equal(len(x), 1+self.B)
gamma = x[0]
xi = x[1:]
assert_ge(gamma, 0)
assert all_pos(xi)
return gamma, xi
def set_mle_params(self, res):
"""
Given an array containing the unpacked parameter set their values
accordingly.
"""
gamma, xi = res
self.gamma[:] = gamma
self.xi[:] = xi
return
# ----------------------------------------------------------------------- #
# Variational inference
# ----------------------------------------------------------------------- #
def elbo_mc(self, params, num_mc_iters=200):
"""
Computes the evidence lower bound for all pairs of nodes, assuming
tied parameters with the priors
gamma ~ Gamma(priors[0][:])
xi[b] ~ Gamma(priors[b][:]) for b = 1, ..., B,
and posteriors
gamma ~ Gamma(pvec[0], qvec[0])
xi[b] ~ Gamma(pvec[b], qvec[b]) for b = 1, ..., B,
by evalutaing the intergal in the expected log-likelihood using Monte
Carlo.
Args:
pvec: array of length B+1 containing the posterior shape params
for the base rate and each kernel;
pvec: array of length B+1 containing the posterior scale params
for the base rate and each kernel.
NOTE:
The implementation supports vectorized computation. Hence,
pvec and qvec can be 2-D arrays of shape (*, B+1), and the returned
ELBO value will be a 1-D array of length *.
"""
if not self.suff_stats_cached:
self.update_suff_stats()
# Unroll
params = np.array(params)
if len(params.shape) == 1: # 0-D or 1-D array
params = params[np.newaxis]
assert_equal(params.dtype, float)
assert_equal(params.shape[1], 2 * (1+self.B))
assert all_pos(params), params
pvec = params[:, :(1+self.B)] # Shape params
qvec = params[:, (1+self.B):] # Scale params
# Monte Carlo estimate of log-likelihood
logliks = np.zeros((params.shape[0], num_mc_iters))
for k in range(num_mc_iters): # Monte Carlo iteration
if (k+1) % 20 == 0:
print "Computing Monte Carlo estimate: %d / %d ..." % \
(k+1, num_mc_iters)
# xi = rand.lognormal(mean=pvec, sigma=qvec) # Including gamma
xi = rand.gamma(shape=pvec, scale=1/qvec) # Including gamma
temp = 0
for u in range(self.N):
for v in range(self.N):
Delta = np.insert(self.Delta[u, v], 0, self.T) # (1+B)-dim
temp -= np.dot(xi, Delta)
for i in range(self.num_node_events(u, v)):
delta = np.insert(self.delta[(u, v)][:, i], 0, 1.)
temp += np.log(np.dot(xi, delta))
logliks[:, k] = temp
exloglik = np.mean(logliks, axis=1) # Monte Carlo average
print "Estimated expected loglik = %s, std.dev = %s" % \
(exloglik, np.std(logliks, axis=1))
# KL-divergence terms
kl_terms = kl_gamma(pvec, qvec,
np.tile(self.priors[:, 0], (pvec.shape[0], 1)),
np.tile(self.priors[:, 1], (pvec.shape[0], 1)))
kl_sum = np.sum(kl_terms, axis=1)
res = exloglik - kl_sum
return res[0] if len(res) == 1 else res
def elbo(self, params):
"""
Computes the evidence lower bound for all pairs of nodes, assuming
tied parameters with the priors
gamma ~ Gamma(priors[0][:])
xi[b] ~ Gamma(priors[b][:]) for b = 1, ..., B,
and posteriors
gamma ~ Gamma(pvec[0], qvec[0])
xi[b] ~ Gamma(pvec[b], qvec[b]) for b = 1, ..., B.
Args:
pvec: array of length B+1 containing the posterior shape params
for the base rate and each kernel;
pvec: array of length B+1 containing the posterior scale params
for the base rate and each kernel.
NOTE:
The implementation supports vectorized computation. Hence,
pvec and qvec can be 2-D arrays of shape (*, B+1), and the returned
ELBO value will be a 1-D array of length *.
"""
if not self.suff_stats_cached:
self.update_suff_stats()
# Unroll
params = np.array(params)
if len(params.shape) == 1: # 0-D or 1-D array
params = params[np.newaxis]
assert_equal(params.dtype, float)
assert_equal(params.shape[1], 2 * (1+self.B))
assert all_pos(params), params
pvec = params[:, :(1+self.B)] # Shape params
qvec = params[:, (1+self.B):] # Scale params
# Expected log-likelihood
exloglik = 0.
for u in range(self.N):
for v in range(self.N):
Delta = np.insert(self.Delta[u, v], 0, self.T) # (1+B)-dim
term = -np.dot(pvec/qvec, Delta)
lterm = 0.
for i in range(self.num_node_events(u, v)):
delta = np.insert(self.delta[(u, v)][:, i], 0, 1.)
temp = np.exp(digamma(pvec) - np.log(qvec))
lterm += np.log(np.dot(temp, delta))
exloglik += term + lterm # Expected log-likelihood
# KL-divergence terms
kl_terms = kl_gamma(pvec, qvec,
np.tile(self.priors[:, 0], (pvec.shape[0], 1)),
np.tile(self.priors[:, 1], (qvec.shape[0], 1)))
kl_sum = np.sum(kl_terms, axis=1)
res = exloglik - kl_sum
return res[0] if len(res) == 1 else res
def coord_ascent(self, monte_carlo=False, **kwargs):
"""
Performs coordinate ascent to maximize the evidence lower bound.
Returns:
x: array of length 2 * (1+B), converged parameter values.
x_vals: array of shape (1+max_iter, 2 * (1+B)), stores previous
params values after each full coordinate descent iteration.
obj_vals: array of length (1+max_iter), stores previous objective
values after each full coordinate descent iteration.
"""
if not self.suff_stats_cached:
self.update_suff_stats()
elbo = self.elbo_mc if monte_carlo else self.elbo
return coord_ascent(obj_fun=elbo, num_params=self.num_params, **kwargs)
# ----------------------------------------------------------------------- #
# MCMC
# ----------------------------------------------------------------------- #
def metropolis(self, num_samples=1000, burnin=500):
"""
Metropolis-Hastings sampling to infer gamma and xi.
"""
def log_exponential_pdf(x, l):
"""
Log pdf for the Exp(l) distribution evaluated at x.
"""
return np.log(l) - l * x
def llik_func(x):
gamma, xi = self.unpack_params(x)
return self.loglik(gamma, xi)
res = np.zeros((num_samples+1, 1+self.B))
res[0] = rand.normal(loc=.1, scale=.02, size=(1+self.B)) # Initialize
# res[0] = rand.exponential(size=(1+self.B)) # Initialize
for i in range(1, num_samples+1):
if i > 0 and i % 50 == 0:
print "M-H sampled %d samples ..." % i
x_old = res[i-1]
x_new = rand.normal(loc=x_old, scale=.02) # Proposal
# x_new = rand.exponential(scale=1./x_old) # Proposal
# # Acceptance ratio
# temp = llik_func(x_new) - llik_func(x_old)
# temp += np.sum(log_exponential_pdf(x_old, x_new))
# temp -= np.sum(log_exponential_pdf(x_new, x_old))
# ratio = np.exp(min(0, temp))
ratio = np.exp(min(0, llik_func(x_new) - llik_func(x_old))) \
if np.all(x_new > 0) else 0
# print x_old, x_new, ratio
res[i] = x_new if rand.uniform() < ratio else x_old
return res[(burnin+1):]
def slice_sample(self, num_samples=1000):
"""
Slice sampling to infer gamma and xi.
"""
def llik_func(x):
gamma, xi = self.unpack_params(x)
return self.loglik(gamma, xi)
res = np.zeros((num_samples+1, 1+self.B))
res[0] = rand.uniform(size=(1+self.B)) # Initialize
for i in range(1, num_samples+1):
if i > 0 and i % 50 == 0:
print "Slice-sampled %d samples ..." % i
res[i] = multivariate_slice_sample(
x_init=res[i-1],
ll_func=llik_func, window_size=1, L_bound=_EPS)
return res[1:]
# ----------------------------------------------------------------------- #
# Simulation
# ----------------------------------------------------------------------- #
def set_params(self, num_nodes=None, events=None, end_time=None,
gamma=None, xi=None):
"""
Manually set all (or a subset of) the parameters for the Hawkes-IRM.
Args:
See self.__init__() description.
"""
if num_nodes is not None:
self.N = num_nodes
if events is not None:
self.node_events = dict()
self.num_events = 0
self.process_node_events(events)
self.T = max(flatten(self.node_events.values())) \
if self.node_events else 0
self.update_suff_stats()
if end_time is not None:
assert_ge(end_time, self.T)
self.T = end_time
if gamma is not None:
self.gamma = gamma
if xi is not None:
self.xi = xi
return
def simulate_single(self, c):
"""
Simulate a single 1-d self-exciting Hawkes process with intensity
\lambda_{cc}(t).
Args:
c: Integer, node index (in 0, ..., self.N).
Returns:
A list of simulated event times.
"""
assert c in range(self.N)
self.node_events[(c, c)] = list() # Clear relevant events history
num = 1 # Number of simulated events
rate = self.base_rate(c, c) # Maximum intensity
jump_size = float(np.dot(self.xi[c, c], self.kernels(0)))
# First event
u = rand.uniform()
s = -np.log(u) / rate
if s > self.T:
return self.node_events[(c, c)]
self.node_events[(c, c)].append(s)
# Intensity function is left-continuous
rate = self.intensity(c, c, s) + jump_size
# General routine
while s < self.T:
u = rand.uniform()
s += -np.log(u) / rate
if s >= self.T:
break
# Rejection test
d = rand.uniform()
new_rate = self.intensity(c, c, s)
if d <= new_rate / rate:
self.node_events[(c, c)].append(s)
# Intensity function is left-continuous
rate = new_rate + jump_size
num += 1
else:
rate = new_rate
assert num == len(self.node_events[(c, c)])
print "Simulated %d events for node %d in [0, %.2f)." % \
(num, c, self.T)
return self.node_events[(c, c)]
def simulate_pair(self, u, v):
"""
Simulate a pair of 1-d self-exciting Hawkes process with intensity
\lambda_{pq}(t) and \lambda_{qp}(t).
The implementation can be generalized to the multi-variate case.
Args:
u, v: Integer, node indices (in 0, ..., self.N).
Returns:
A list of simualted event times.
"""
assert u in range(self.N) and v in range(self.N)
assert u < v # simulate_pair should only be called once for each pair
# Clear relevant events history
self.node_events[(u, v)] = list()
self.node_events[(v, u)] = list()
# Intensity functipn
num = 1 # Total number of simulated events
rate = self.base_rate(u, v) + self.base_rate(v, u)
# First event
U = rand.uniform()
s = -np.log(U) / rate
if s > self.T: # Done
return self.node_events[(u, v)], self.node_events[(v, u)]
def attribution_test(t):
"""
Determines which process the newly generated event t should be
attributed to.
"""
r_pq = self.intensity(u, v, t)
r_qp = self.intensity(v, u, t)
rate = r_pq + r_qp
idx = (u, v) if rand.uniform() < r_pq / rate else (v, u)
return idx, rate
# Attribution test: which process gets the event
idx, rate = attribution_test(s)
self.node_events[idx].append(s)
# Intensity function is left-continuous
rate += float(np.dot(self.xi[idx], self.kernels(0)))
# General routine
while s < self.T:
U = rand.uniform()
s += -np.log(U) / rate
if s >= self.T:
break # Done
# Rejection test
d = rand.uniform()
new_rate = self.intensity(u, v, s) + self.intensity(v, u, s)
if d <= new_rate / rate:
idx, rate = attribution_test(s)
self.node_events[idx].append(s)
# Intensity function is left-continuous
rate += float(np.dot(self.xi[idx], self.kernels(0)))
num += 1
else:
rate = new_rate
assert num == len(self.node_events[(u, v)]) + \
len(self.node_events[(v, u)])
print "Simulated %d events for (%d, %d) node pair in [0, %.2f)." % \
(num, u, v, self.T)
return self.node_events[(u, v)], self.node_events[(v, u)]
def simulate(self):
"""
Simulate event times for all Hawkes processes.
"""
self.events = list()
self.node_events = dict() # Clear relevant events history
self.suff_stats_cached = False
# Simulate node-level events (self.node_events)
for c in range(self.N):
self.simulate_single(c)
for u in range(self.N):
for v in range(u + 1, self.N):
self.simulate_pair(u, v)
events = self.extract_events()
return events
``` |
{
"source": "jiaseny/sp-gof",
"score": 3
} |
#### File: jiaseny/sp-gof/ex-hawkes.py
```python
from __future__ import division
from util import *
from kernels import *
from hawkes_process import HawkesProcess
from ksd import KSD
from mmd import MMD
if __name__ == "__main__":
dim, n, l, kernel_type, gamma0, beta0, tau0, gamma, beta, tau, seed, res_dir = sys.argv[1:]
dim = int(dim) # Dimension
n = int(n) # Sample size
l = float(l) # Domain length
# Null model parameters
gamma0 = float(gamma0)
beta0 = float(beta0)
tau0 = float(tau0)
# Alternative model parameters
gamma = float(gamma)
beta = float(beta)
tau = float(tau)
seed = int(seed) # Random seed
bounds = [(0, l)]*dim # Domain
print ("dim = %d\nn = %d\nl = %s\nkernel_type = %s\nbounds = %r\n" +
"gamma0 = %s\nbeta0 = %s\ntau0 = %s\n" +
"gamma = %s\nbeta = %s\ntau = %s\nseed = %s\nres_dir=%s\n") % \
(dim, n, l, kernel_type, bounds, gamma0, beta0, tau0, gamma, beta, tau,
seed, res_dir)
# Stationarity condition
assert dim == 1, dim
assert beta0 * tau0 < 1
assert beta * tau < 1
rand.seed(seed)
# --------------------------- Draw samples --------------------------- #
# Null model
model_p = HawkesProcess(dim=dim, bounds=bounds,
gamma=gamma0, beta=beta0, tau=tau0)
samples_p = model_p.sample(num_samples=n)
# Set q to perturbed dist or true p
true_dist = rand.binomial(n=1, p=.5) # 0 for p, 1 for q
print "Ground truth: %s" % ("q != p" if true_dist else "q == p")
# Parameters for alternative model
gamma_q, beta_q, tau_q = (gamma, beta, tau) if true_dist else \
(gamma0, beta0, tau0)
# Alternative model (draw samples from)
model_q = HawkesProcess(dim=dim, bounds=bounds,
gamma=gamma_q, beta=beta_q, tau=tau_q)
samples_q = model_q.sample(num_samples=n)
# --------------- Set kernel function for KSD and MMD --------------- #
rbf_h = None
# TODO! Make sure KSD and MMD are using the same kernel_fun and bandwdith
if kernel_type == 'emd':
kernel_fun = emd_kernel
# int_method = 'fixed_quad'
int_method = 'trapz'
ksd_method = 'indirect'
elif kernel_type == 'euclid':
kernel_fun = euclidean_kernel
int_method = 'fixed_quad'
ksd_method = 'indirect'
elif kernel_type == 'mmd':
# Compute RBF bandwdith using median heuristic
dists = pdist(np.concatenate(samples_q), metric="sqeuclidean")
rbf_h = np.median(dists) # dists[dists > 0]
del dists # Free memory
def kernel_fun(X, Y):
return mmd_kernel(X, Y, rbf_h=rbf_h)
int_method = 'trapz'
ksd_method = 'direct'
else:
raise ValueError("kernel_type %s not recognized!" % kernel_type)
# ------------------------- Perform KSD test ------------------------- #
print "Performing KSD test ..."
ksd = KSD(dim=model_p.dim, bounds=model_p.bounds,
papangelou_fun=model_p.papangelou,
kernel_type=kernel_type,
int_method=int_method,
rbf_h=rbf_h,
mp_npts=400,
mc_npts=10**4,
disp=False)
kappa = ksd.compute_kappa(samples_q, method=ksd_method)
ksd_stat = ksd.test_statistic(kappa)
ksd_thres, ksd_boot = ksd.bootstrap(kappa)
ksd_pval = ksd.p_value(ksd_stat, ksd_boot)
ksd_pred = 1 * (ksd_stat > ksd_thres) # 0 for p, 1 for q
# ------------------------- Perform MMD test ------------------------- #
print "Performing MMD test ..."
mmd = MMD(kernel_fun=kernel_fun)
mmd_stat, mmd_thres, mmd_pval, _ = mmd.perform_test(samples_p, samples_q)
mmd_pred = 1 * (mmd_stat > mmd_thres) # 0 for p, 1 for q
# ------------------------- Save results ------------------------- #
res = {'dim': dim, 'n': n, 'kernel_type': kernel_type,
'gamma0': gamma0, 'beta0': beta0, 'tau0': tau0,
'gamma': gamma, 'beta': beta, 'tau': tau, 'rbf_h': rbf_h,
'true_dist': true_dist, 'int_method': int_method,
'ksd_stat': ksd_stat, 'ksd_thres': ksd_thres,
'ksd_pval': ksd_pval, 'ksd_pred': ksd_pred,
'mmd_stat': mmd_stat, 'mmd_thres': mmd_thres,
'mmd_pval': mmd_pval, 'mmd_pred': mmd_pred}
pckl_write(res, res_dir + "hawkes-d%d-n%d-l%s-%s-gamma%.3f-beta%.3f-tau%.3f-seed%d.res" %
(dim, n, l, kernel_type, gamma, beta, tau, seed))
print 'Finished!'
```
#### File: jiaseny/sp-gof/hawkes_process.py
```python
from __future__ import division
from util import *
from point_process import PointProcess
class HawkesProcess(PointProcess):
"""
Hawkes process (one-dimensional).
"""
def __init__(self, dim, bounds, gamma, beta, tau):
"""
Initializes a one-dimensional Hawkes process.
Args:
dim: int, dimension of domain.
bounds: list of length dim, bounding box for each dimension.
gamma, beta, tau: floats, parameters in triggering function.
"""
super(HawkesProcess, self).__init__(dim, bounds)
if dim != 1 or bounds[0][0] != 0:
raise NotImplementedError(
"Only 1-D Hawkes processes, with time starting at 0!\n")
assert beta*tau < 1, "Stationarity condition violated!\n"
# Model parameters
self.bounds = bounds
self.T = self.bounds[0][1] # End time
self.gamma = gamma
self.beta = beta
self.tau = tau
return
def intensity(self, t, X):
"""
Conditional intensity function given event history X up to time t.
Args:
t: array(dim), a new point location.
X: array((..., dim)), existing points in a sample.
"""
X = np.asarray(X)
recip = np.exp(-(t - X[X < t]) / self.tau) # Reciprocation
return self.gamma + np.sum(self.beta * recip, axis=0)
def trigger(self, dt):
"""
Computes triggering function.
Args:
dt: float or array, delta time.
"""
return self.beta * np.exp(-dt / self.tau)
def papangelou(self, t, X):
"""
Papangelou conditional intensity function at time t given points X.
This is different from the conditional intensity (self.intensity).
Args:
t: array(dim), a new time point.
X: array((..., dim)), existing time-points in a sample.
"""
assert X.shape[1] == 1
times = X.ravel() # array
denom = [self.intensity(u, X) for u in times[times > t]] # Check
g_t = self.trigger(times[times > t] - t)
log_frac = np.sum(np.log(denom + g_t) - np.log(denom))
# Integrated intensity term
log_Lterm = -self.beta*self.tau * (1.-np.exp(-(self.T-t)/self.tau))
# Intensity term
log_lterm = np.log(self.intensity(t, X))
return np.exp(log_Lterm + log_lterm + log_frac)
def simulate(self):
"""
Simulate a self-exciting Hawkes process using Ogata's thinning algorithm.
Returns:
X: array((..., dim)) of simulated event times.
"""
if self.dim != 1 or self.bounds[0][0] != 0:
raise NotImplementedError
# Use list for in-place append, but return X as array
X = list() # Simulated event times
num = 0 # Number of simulated events
rate = self.gamma # Maximum intensity
# First event
s = -np.log(rand.uniform()) / rate
if s > self.T:
return np.array(X).reshape((num, self.dim))
X.append(s)
num += 1
rate = self.intensity(s, X) + self.beta # Left-continuous
# Subsequent events
while s < self.T:
s += -np.log(rand.uniform()) / rate
if s >= self.T:
break
# Rejection test
new_rate = self.intensity(s, X)
if rand.uniform() <= new_rate / rate:
X.append(s)
num += 1
rate = new_rate + self.beta # Left-continuous
else:
rate = new_rate
assert_len(X, num)
return np.asarray(X).reshape((num, self.dim))
def sample(self, num_samples):
"""
Draw samples from a Hawkes process. Wrapper for simulate().
Args:
num_samples: int, number of point process realizations.
"""
return [self.simulate() for _ in xrange(num_samples)]
``` |
{
"source": "jiashenC/eva",
"score": 2
} |
#### File: jiashenC/eva/eva_client.py
```python
import asyncio
from src.server.client import EvaClient, start_client
from src.configuration.configuration_manager import ConfigurationManager
from src.utils.logging_manager import LoggingManager
from src.utils.logging_manager import LoggingLevel
def eva_client():
"""
Start the eva system
"""
# Get the hostname and port information from the configuration file
config = ConfigurationManager()
hostname = config.get_value('server', 'host')
port = config.get_value('server', 'port')
# Launch server
try:
asyncio.run(start_client(factory=lambda: EvaClient(),
host=hostname,
port=port,
max_retry_count=3)
)
except Exception as e:
LoggingManager().log(e, LoggingLevel.CRITICAL)
if __name__ == '__main__':
# execute only if run as the entry point into the program
eva_client()
```
#### File: src/expression/aggregation_expression.py
```python
from src.expression.abstract_expression import AbstractExpression, \
ExpressionType, \
ExpressionReturnType
from src.models.storage.batch import Batch
class AggregationExpression(AbstractExpression):
def __init__(self, exp_type: ExpressionType, left: AbstractExpression,
right: AbstractExpression):
children = []
if left is not None:
children.append(left)
if right is not None:
children.append(right)
super().__init__(exp_type, rtype=ExpressionReturnType.INTEGER,
children=children) # can also be a float
def evaluate(self, *args):
batch = self.get_child(0).evaluate(*args)
if self.etype == ExpressionType.AGGREGATION_SUM:
return Batch(frames=batch.frames.agg(['sum']))
elif self.etype == ExpressionType.AGGREGATION_COUNT:
return Batch(frames=batch.frames.agg(['count']))
elif self.etype == ExpressionType.AGGREGATION_AVG:
return Batch(frames=batch.frames.agg(['mean']))
elif self.etype == ExpressionType.AGGREGATION_MIN:
return Batch(frames=batch.frames.agg(['min']))
elif self.etype == ExpressionType.AGGREGATION_MAX:
return Batch(frames=batch.frames.agg(['max']))
def __eq__(self, other):
is_subtree_equal = super().__eq__(other)
if not isinstance(other, AggregationExpression):
return False
return (is_subtree_equal
and self.etype == other.etype)
```
#### File: src/expression/function_expression.py
```python
import pandas as pd
from enum import Enum, unique
from typing import Callable
from src.constants import NO_GPU
from src.executor.execution_context import Context
from src.expression.abstract_expression import AbstractExpression, \
ExpressionType
from src.models.storage.batch import Batch
from src.udfs.gpu_compatible import GPUCompatible
from src.catalog.models.udf_io import UdfIO
@unique
class ExecutionMode(Enum):
# EXEC means the executed function mutates the batch frame and returns
# it back. The frame batch is mutated.
EXEC = 1
# EVAL function with return values
EVAL = 2
class FunctionExpression(AbstractExpression):
"""
Expression used for function evaluation
Arguments:
func (Callable): UDF or EVA built in function for performing
operations on the
mode (ExecutionMode): The mode in which execution needs to happen.
Will just return the output in EVAL mode. EXEC mode updates the
BatchFrame with output.
is_temp (bool, default:False): In case of EXEC type, decides if the
outcome needs to be stored in BatchFrame temporarily.
output(str): The column to return after executing function
output_obj(UdfIO): The catalog object corresponding to the func_output.
To be populated by optimizer.
"""
def __init__(self, func: Callable,
mode: ExecutionMode = ExecutionMode.EVAL, name=None,
is_temp: bool = False, output=None,
**kwargs):
if mode == ExecutionMode.EXEC:
assert name is not None
super().__init__(ExpressionType.FUNCTION_EXPRESSION, **kwargs)
self._context = Context()
self._mode = mode
self._name = name
self._function = func
self._is_temp = is_temp
self._output = output
self._output_obj = None
@property
def name(self):
return self._name
@property
def output(self):
return self._output
@property
def output_obj(self):
return self._output_obj
@output_obj.setter
def output_obj(self, val: UdfIO):
self._output_obj = val
@property
def function(self):
return self._function
@function.setter
def function(self, func: Callable):
self._function = func
def evaluate(self, batch: Batch):
new_batch = batch
child_batches = [child.evaluate(batch) for child in self.children]
if len(child_batches):
new_batch = Batch.merge_column_wise(child_batches)
func = self._gpu_enabled_function()
outcomes = func(new_batch.frames)
outcomes = Batch(pd.DataFrame(outcomes))
if self._output:
return outcomes.project([self._output])
else:
return outcomes
def _gpu_enabled_function(self):
if isinstance(self._function, GPUCompatible):
device = self._context.gpu_device()
if device != NO_GPU:
return self._function.to_device(device)
return self._function
def __eq__(self, other):
is_subtree_equal = super().__eq__(other)
if not isinstance(other, FunctionExpression):
return False
return (is_subtree_equal and self.name == other.name
and self.output == other.output
and self.output_obj == other.output_obj
and self.function == other.function)
```
#### File: optimizer/generators/load_generator.py
```python
from src.optimizer.generators.base import Generator
from src.optimizer.operators import LogicalLoadData, Operator
from src.planner.load_data_plan import LoadDataPlan
class LoadDataGenerator(Generator):
def __init__(self):
self._table_metainfo = None
self._path = None
def _visit(self, operator: Operator):
if isinstance(operator, LogicalLoadData):
self._table_metainfo = operator.table_metainfo
self._path = operator.path
def build(self, operator: Operator):
self.__init__()
self._visit(operator)
load_plan = LoadDataPlan(self._table_metainfo, self._path)
return load_plan
```
#### File: src/spark/session.py
```python
from pyspark.sql import SparkSession
from pyspark.conf import SparkConf
from src.configuration.configuration_manager import ConfigurationManager
from src.utils.logging_manager import LoggingManager
class Session(object):
"""
Wrapper around Spark Session
"""
_instance = None
_session = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(Session, cls).__new__(cls)
return cls._instance
def __init__(self):
self._config = ConfigurationManager()
name = self._config.get_value('core', 'application')
self.init_spark_session(name)
def init_spark_session(self, application_name, spark_master=None):
"""Setup a spark session.
:param spark_master: A master parameter used by spark session builder.
Use default value (None) to use system
environment configured spark cluster.
Use 'local[*]' to run on a local box.
:return: spark_session: A spark session
"""
eva_spark_conf = SparkConf()
pyspark_config = self._config.get_value('pyspark', 'property')
for key, value in pyspark_config.items():
eva_spark_conf.set(key, value)
session_builder = SparkSession \
.builder \
.appName(application_name) \
.config(conf=eva_spark_conf)
if spark_master:
session_builder.master(spark_master)
# Gets an existing SparkSession or,
# if there is no existing one, creates a new one based
# on the options set in this builder.
self._session = session_builder.getOrCreate()
# Configure logging
log4j_level = LoggingManager().getLog4JLevel()
spark_context = self._session.sparkContext
spark_context.setLogLevel(log4j_level)
def get_session(self):
return self._session
def get_context(self):
return self._session.sparkContext
def stop(self):
self._session.stop()
def __del__(self):
self._session.stop()
```
#### File: test/expression/test_function_expression.py
```python
import unittest
import pandas as pd
from mock import MagicMock, Mock, patch
from src.constants import NO_GPU
from src.expression.function_expression import FunctionExpression, \
ExecutionMode
from src.models.storage.batch import Batch
from src.udfs.gpu_compatible import GPUCompatible
class FunctionExpressionTest(unittest.TestCase):
def test_should_work_for_function_without_children_eval_mode(self):
expression = FunctionExpression(lambda x: pd.DataFrame(x))
values = Batch(pd.DataFrame([1, 2, 3]))
actual = expression.evaluate(values)
self.assertEqual(values, actual)
@unittest.skip("outcome in batch is not used.")
def test_should_update_the_batch_with_outcomes_in_exec_mode(self):
values = [1, 2, 3]
expression = FunctionExpression(lambda x: values,
mode=ExecutionMode.EXEC, name="test")
expected_batch = Batch(frames=pd.DataFrame(),
outcomes={"test": [1, 2, 3]})
input_batch = Batch(frames=pd.DataFrame())
expression.evaluate(input_batch)
self.assertEqual(expected_batch, input_batch)
def test_should_throw_assert_error_when_name_not_provided_exec_mode(self):
self.assertRaises(AssertionError,
lambda _=None:
FunctionExpression(lambda x: [],
mode=ExecutionMode.EXEC),
)
def test_when_function_executor_with_a_child_should_allow_chaining(self):
expression = FunctionExpression(lambda x:
pd.DataFrame(x))
child = FunctionExpression(lambda x: x + 1)
expression.append_child(child)
values = Batch(pd.DataFrame([1, 2, 3]))
actual = expression.evaluate(values)
expected = Batch(pd.DataFrame([2, 3, 4]))
self.assertEqual(expected, actual)
def test_should_filter_function_output(self):
expression = FunctionExpression(lambda x: x + 1, output='id')
values = pd.DataFrame({'id': [1, 2], 'data': [1, 2]})
actual = expression.evaluate(Batch(values))
expected = Batch(pd.DataFrame(values['id']) + 1)
self.assertEqual(expected, actual)
@unittest.skip("temp outcome in batch is not used.")
def test_should_update_temp_outcomes_when_is_temp_set_exec_mode(self):
values = [1, 2, 3]
expression = FunctionExpression(lambda x: values,
mode=ExecutionMode.EXEC,
name="test", is_temp=True)
expected_batch = Batch(frames=pd.DataFrame(),
temp_outcomes={"test": [1, 2, 3]})
input_batch = Batch(frames=pd.DataFrame())
expression.evaluate(input_batch)
self.assertEqual(expected_batch, input_batch)
@patch('src.expression.function_expression.Context')
def test_function_move_the_device_to_gpu_if_compatible(self, context):
context_instance = context.return_value
mock_function = MagicMock(spec=GPUCompatible)
gpu_mock_function = Mock(return_value=pd.DataFrame())
gpu_device_id = '2'
mock_function.to_device.return_value = gpu_mock_function
context_instance.gpu_device.return_value = gpu_device_id
expression = FunctionExpression(mock_function,
mode=ExecutionMode.EXEC,
name="test", is_temp=True)
input_batch = Batch(frames=pd.DataFrame())
expression.evaluate(input_batch)
mock_function.to_device.assert_called_with(gpu_device_id)
gpu_mock_function.assert_called()
def test_should_use_the_same_function_if_not_gpu_compatible(self):
mock_function = MagicMock(return_value=pd.DataFrame())
expression = FunctionExpression(mock_function,
mode=ExecutionMode.EXEC,
name="test", is_temp=True)
input_batch = Batch(frames=pd.DataFrame())
expression.evaluate(input_batch)
mock_function.assert_called()
@patch('src.expression.function_expression.Context')
def test_should_execute_same_function_if_no_gpu(self, context):
context_instance = context.return_value
mock_function = MagicMock(spec=GPUCompatible,
return_value=pd.DataFrame())
context_instance.gpu_device.return_value = NO_GPU
expression = FunctionExpression(mock_function,
mode=ExecutionMode.EXEC,
name="test", is_temp=True)
input_batch = Batch(frames=pd.DataFrame())
expression.evaluate(input_batch)
mock_function.assert_called()
``` |
{
"source": "jiashenggu/gpt-neo",
"score": 3
} |
#### File: jiashenggu/gpt-neo/tasks.py
```python
import os.path
import json
import requests
import numpy as np
import ftfy
from data.encoders import fetch_encoder, encode
import tensorflow as tf
import re
from functools import partial
lambada_src_uri = 'http://eaidata.bmk.sh/data/lambada_test.jsonl'
normalization = 'NFKC'
# Note: this task is called "lambada" but it really refers to OpenAI's version
# of the task, which actually differs in some ways from the task described in
# the original paper. So, strictly speaking, accuracy values from this task
# should not be compared to accuracy values from the original lambada task.
# For more information, see
# https://github.com/openai/gpt-2/issues/131
def lambada_create_tokens_data(params, path):
with open(path, 'w') as f:
req = requests.get(lambada_src_uri)
req.raise_for_status()
jsons = [json.loads(l) for l in req.iter_lines()]
texts = [ftfy.fix_text(j['text'], normalization=normalization) for j in jsons]
enc = fetch_encoder(params)
arrays = [encode(enc, t) for t in texts]
json.dump(arrays, f)
return arrays
def lambada_read_or_create_tokens_data(params, path):
# if you tell me where the file should go, i will helpfully create it for you
if not os.path.exists(path):
return lambada_create_tokens_data(params, path)
with open(path) as f:
return json.load(f)
def bin_pack(params, tokens_data):
eos_token = params['eos_id']
n_ctx = params['n_ctx']
dummy_token = 1
pad_batch_size = params['eval_batch_size']
bins = []
for a in tokens_data:
if len(bins) == 0 or len(bins[-1]) + len(a) + 1 > n_ctx:
bins.append([])
bins[-1] += a
bins[-1].append(eos_token)
while len(bins) % pad_batch_size != 0:
bins.append([])
bins_array = np.full((len(bins), n_ctx), dummy_token, dtype=np.uint16)
for i, b in enumerate(bins):
bins_array[i, 0:len(b)] = b
return bins_array
def lambada_init(params):
ds_configs = params['dataset_configs']
l = [
ds_configs[ds_id].get('lambada_tokens_path', "./lambada.json")
for ds_id, _, _, _ in params['datasets']
]
assert len(l) > 0, 'lambada_tokens_path not found in the dataset config'
lt_path = l[0]
assert lt_path.endswith('.json'), 'lambada_tokens_path must have extension json'
tokens_data = lambada_read_or_create_tokens_data(params, lt_path)
bins_array = bin_pack(params, tokens_data)
params['lambada_tokens_path'] = lt_path
params['lambada_n_steps'] = len(bins_array) // params['eval_batch_size']
def lambada_get_task_info(params):
return {
'n_steps': params['lambada_n_steps'],
}
# The LAMBADA evaluation code looks at the logits of each position just before an eos_token
def lambada_input(params):
eos_token = <PASSWORD> if params['n_vocab'] >= 50257 else 0
n_ctx = params['n_ctx']
lt_path = params['lambada_tokens_path']
tokens_data = lambada_read_or_create_tokens_data(params, lt_path)
bins_array = bin_pack(params, tokens_data)
dataset = tf.data.Dataset.from_tensor_slices(bins_array)
def _get_output(bin):
bin = tf.cast(bin, dtype=tf.int32)
indexes = tf.range(n_ctx)
results = tf.gather(bin, (indexes + 1) % n_ctx)
eos_next_positions = tf.math.equal(tf.gather(bin, (indexes + 2) % n_ctx), eos_token)
output = tf.where(eos_next_positions, results, tf.constant(eos_token, shape=[n_ctx]))
bin = tf.reshape(bin, [n_ctx])
bin = tf.cast(bin, dtype=tf.int32)
output = tf.reshape(output, [n_ctx])
output = tf.cast(output, dtype=tf.int32)
return bin, output
dataset = dataset.map(_get_output,num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(params['eval_batch_size'], drop_remainder=True)
dataset = dataset.repeat()
return dataset
task_descriptors = {
'lambada': {
'init_fn': lambada_init,
'get_task_info_fn': lambada_get_task_info,
'input_fn': lambada_input,
}
}
``` |
{
"source": "JiashengWu/t2wml",
"score": 3
} |
#### File: t2wml/Code/UserData.py
```python
from Code.ExcelData import ExcelData
from Code.YAMLData import YAMLData
from Code.WikifierOutputData import WikifierOutputData
class UserData:
def __init__(self, id: str):
self.id = id
self.sparql_endpoint = "https://query.wikidata.org/sparql"
self.excel_data = ExcelData()
self.yaml_data = YAMLData()
self.wikifier_output_data = WikifierOutputData()
def get_user_id(self) -> str:
"""
This function returns the user id
:return:
"""
return self.id
def get_sparql_endpoint(self) -> str:
"""
This function returns the SPARQL endpoint
:return:
"""
return self.sparql_endpoint
def get_excel_data(self) -> ExcelData:
"""
This function returns the ExcelData object
:return:
"""
return self.excel_data
def get_yaml_data(self) -> YAMLData:
"""
This function returns the YAMLData object
:return:
"""
return self.yaml_data
def get_wikifier_output_data(self) -> WikifierOutputData:
"""
This function returns the WikifierOutputData object
:return:
"""
return self.wikifier_output_data
def set_sparql_endpoint(self, endpoint) -> None:
"""
This function sets the SPARQL endpoint
:return:
"""
self.sparql_endpoint = endpoint
def reset(self, attribute: str = None) -> None:
"""
This function deletes all the user files and resets all the class members
:param attribute:
:return:
"""
if attribute == 'excel':
self.excel_data.reset()
elif attribute == 'yaml':
self.yaml_data.reset()
elif attribute == 'wikifier_output':
self.wikifier_output_data.reset()
else:
self.excel_data.reset()
self.yaml_data.reset()
self.wikifier_output_data.reset()
``` |
{
"source": "JiashengYan/CarND-Term1-P3",
"score": 3
} |
#### File: JiashengYan/CarND-Term1-P3/model.py
```python
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.layers import Lambda, Conv2D, MaxPooling2D, Dropout, Dense, Flatten, MaxPool2D,Activation
import sklearn
import cv2
import argparse
import os
import matplotlib.pyplot as plt
import keras
import tensorflow as tf
def load_data(args):
"""
Load training data and split it into training and validation set
"""
data_df = pd.read_csv(os.path.join(args.data_dir, 'driving_log.csv'))
X = data_df[['center', 'left', 'right']].values
y = data_df['steering'].values
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=args.test_size, random_state=0)
return X_train, X_valid, y_train, y_valid
def load_image(data_dir, image_file):
"""
Load RGB images from a file, convert to grayscale and crop the image (removing the sky at the top and the car front at the bottom)
"""
path = data_dir+ '/' + 'IMG' + '/' + image_file.split('/')[-1]
image = cv2.imread(path)
image = image[50:-20, :, :]
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return cv2.resize(image, (320, 90), cv2.INTER_AREA)
def generator(data_dir, image_paths, steering_angles, correction, batch_size=32):
'''
generate train data batch and validation data batch
'''
num_samples = len(steering_angles)
while 1: # Loop forever so the generator never terminates
sklearn.utils.shuffle(image_paths, steering_angles)
for offset in range(0, num_samples, batch_size):
image_paths_batch = image_paths[offset:offset+batch_size]
steering_angles_batch = steering_angles[offset:offset+batch_size]
images = []
angles = []
for image_path, angle in zip(image_paths_batch,steering_angles_batch):
img_center = load_image(data_dir, image_path[0])
img_left = load_image(data_dir, image_path[1])
img_right = load_image(data_dir, image_path[2])
images.extend((img_center,img_left,img_right))
steering_center = float(angle)
# create adjusted steering measurements for the side camera images
steering_left = steering_center + correction
steering_right = steering_center - correction
angles.extend((steering_center, steering_left, steering_right))
img_center_flipped = np.fliplr(img_center)
img_left_flipped = np.fliplr(img_left)
img_right_flipped = np.fliplr(img_right)
images.extend((img_center_flipped,img_left_flipped,img_right_flipped))
steering_center_flipped = -steering_center
steering_left_flipped = -steering_left
steering_right_flipped = -steering_right
angles.extend((steering_center_flipped, steering_left_flipped, steering_right_flipped))
# trim image to only see section with road
X_train = np.array(images).reshape((-1,90,320,1))
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
def build_model(args):
config = tf.ConfigProto(device_count={"CPU": 6})
keras.backend.tensorflow_backend.set_session(tf.Session(config=config))
model = Sequential()
## Modified NVIDIA model
# model.add(Lambda(lambda x: x/127.5-1.0, input_shape=(90,320,1)))
# model.add(Conv2D(24, (5, 5), activation='elu', subsample=(2, 2)))
# model.add(Conv2D(36, (5, 5), activation='elu', subsample=(2, 2)))
# model.add(Conv2D(48, (5, 5), activation='elu', subsample=(2, 2)))
# model.add(Conv2D(64, (3, 3), activation='elu'))
# model.add(Conv2D(64, (3, 3), activation='elu'))
# model.add(Dropout(args.keep_prob))
# model.add(Flatten())
# model.add(Dense(100, activation='elu'))
# model.add(Dense(50, activation='elu'))
# model.add(Dense(10, activation='elu'))
# model.add(Dense(1))
# self made
model.add(Lambda(lambda x: (x / 255.0) - 0.5,input_shape=(90,320,1)))
# model.add(Cropping2D(cropping=((50,20), (0,0))))
model.add(Conv2D(filters=3,kernel_size=(5,5)))
model.add(MaxPool2D(pool_size=(4,4)))
model.add(Activation('elu'))
# second conv layer
model.add(Conv2D(6,(3,3)))
model.add(MaxPool2D(pool_size=(4,4)))
model.add(Activation('elu'))
# third conv layer
model.add(Conv2D(12,(3,3)))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(64,activation='elu'))
model.add(Dropout(args.keep_prob))
model.add(Dense(32,activation='elu'))
model.add(Dropout(args.keep_prob))
model.add(Dense(16,activation='elu'))
model.add(Dropout(args.keep_prob))
model.add(Dense(1))
# # Lenet
# model.add(Lambda(lambda x: (x / 255.0) - 0.5,input_shape=(90,320,1)))
# model.add(Conv2D(6, kernel_size=(5, 5),
# activation='elu'))
# model.add(MaxPooling2D(pool_size=(4, 4)))
# model.add(Conv2D(12, (5, 5), activation='elu'))
# model.add(MaxPooling2D(pool_size=(4, 4)))
# model.add(Dropout(0.5))
# model.add(Flatten())
# model.add(Dense(256, activation='elu'))
# model.add(Dropout(0.5))
# model.add(Dense(128, activation='elu'))
# model.add(Dropout(0.5))
# model.add(Dense(1))
model.summary()
return model
def train_model(model, args, X_train, X_valid, y_train, y_valid):
"""
Train the model
"""
checkpoint = ModelCheckpoint('model-{epoch:03d}.h5',
monitor='val_loss',
verbose=0,
save_best_only=args.save_best_only,
mode='auto')
model.compile(loss='mean_squared_error', optimizer=Adam(lr=args.learning_rate))
steps_per_epoch = len(X_train)//args.batch_size
validation_steps = len(X_valid)//args.batch_size
print ('steps',len(X_train),len(X_valid),steps_per_epoch,validation_steps)
history_object = model.fit_generator(generator(args.data_dir, X_train, y_train, args.correction ,args.batch_size),
steps_per_epoch=steps_per_epoch,
validation_data=generator(args.data_dir, X_valid, y_valid, args.correction, args.batch_size),
validation_steps=validation_steps,
callbacks=[checkpoint],
epochs=args.nb_epoch,
verbose=1,max_q_size=1)
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.ylim([0,0.05])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
def s2b(s):
"""
Converts a string to boolean value
"""
s = s.lower()
return s == 'true' or s == 'yes' or s == 'y' or s == '1'
def main():
"""
Load train/validation data set and train the model
"""
parser = argparse.ArgumentParser(description='Behavioral Cloning Training Program')
parser.add_argument('-d', help='data directory', dest='data_dir', type=str, default='data')
parser.add_argument('-t', help='test size fraction', dest='test_size', type=float, default=0.2)
parser.add_argument('-k', help='drop out probability', dest='keep_prob', type=float, default=0.5)
parser.add_argument('-n', help='number of epochs', dest='nb_epoch', type=int, default=5)
parser.add_argument('-c', help='steering correction', dest='correction', type=float, default=0.2)
parser.add_argument('-b', help='batch size', dest='batch_size', type=int, default=32)
parser.add_argument('-o', help='save best models only', dest='save_best_only', type=s2b, default='true')
parser.add_argument('-l', help='learning rate', dest='learning_rate', type=float, default=1.0e-3)
args = parser.parse_args()
print('-' * 30)
print('Parameters')
print('-' * 30)
for key, value in vars(args).items():
print('{:<20} := {}'.format(key, value))
print('-' * 30)
data = load_data(args)
model = build_model(args)
train_model(model, args, *data)
if __name__ == '__main__':
main()
``` |
{
"source": "jiashunwang/Long-term-Motion-in-3D-Scenes",
"score": 2
} |
#### File: jiashunwang/Long-term-Motion-in-3D-Scenes/route.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class PointNetfeat(nn.Module):
def __init__(self, global_feat = True, feature_transform = False):
super(PointNetfeat, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 256, 1)
self.bn1 = nn.InstanceNorm1d(64)
self.bn2 = nn.InstanceNorm1d(128)
self.bn3 = nn.InstanceNorm1d(256)
self.global_feat = global_feat
self.feature_transform = feature_transform
def forward(self, x):
n_pts = x.size()[2]
x = F.relu(self.bn1(self.conv1(x)))
pointfeat = x
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 256)
return x
if self.global_feat:
return x, trans, trans_feat
else:
x = x.view(-1, 256, 1).repeat(1, 1, n_pts)
return torch.cat([x, pointfeat], 1)#, trans, trans_feat
class PointNetDenseCls(nn.Module):
def __init__(self, k = 13, feature_transform=False):
super(PointNetDenseCls, self).__init__()
self.k = k
self.feat = PointNetfeat(global_feat=False, feature_transform=feature_transform)
self.conv1 = torch.nn.Conv1d(256+64, 256, 1)
self.conv2 = torch.nn.Conv1d(256, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 64, 1)
self.conv4 = torch.nn.Conv1d(64, self.k, 1)
self.bn1 = nn.BatchNorm1d(256)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(64)
def forward(self, x):
batchsize = x.size()[0]
n_pts = x.size()[2]
x = self.feat(x)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.conv4(x)
x = x.transpose(2,1).contiguous()
x = F.log_softmax(x.view(-1,self.k), dim=-1)
x = x.view(batchsize, n_pts, self.k)
return x
class ROUTENET(nn.Module):
def __init__(self, input_dim=9, hid_dim=64, n_layers=1, dropout=0,bidirectional=True,scene_model_ckpt=True,device='cuda'):
super().__init__()
self.input_dim = input_dim
self.hid_dim = hid_dim
self.n_layers = n_layers
self.lstm = nn.LSTM(input_dim, hid_dim, n_layers, dropout=dropout,bidirectional=bidirectional,batch_first=True)
self.fc_scene = nn.Linear(256,32)
self.fc = nn.Linear(hid_dim*2*2+32,hid_dim*2*2)
self.fc2 = nn.Linear(hid_dim*2*2,60*input_dim)
pointnet = PointNetDenseCls().to(device)#.cuda()
if scene_model_ckpt is True:
pointnet.load_state_dict(torch.load('./saved_model/point.model'))
removed = list(pointnet.children())[0:1]
self.pointfeature = nn.Sequential(*removed)
def forward(self, x,scene_points):
batch_size = x.shape[0]
outputs, (hidden, cell) = self.lstm(x)
outputs = outputs.reshape(batch_size,-1)
pointfea = self.pointfeature(scene_points)#.detach()
pointfea = self.fc_scene(pointfea)
outputs = torch.cat([outputs,pointfea],dim=1)
outputs = F.relu(self.fc(outputs))
outputs = self.fc2(outputs)
outputs = outputs.reshape(batch_size,60,self.input_dim)
return outputs
``` |
{
"source": "jiashunwang/Neural-Pose-Transfer",
"score": 3
} |
#### File: jiashunwang/Neural-Pose-Transfer/model.py
```python
from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
class PoseFeature(nn.Module):
def __init__(self, num_points = 6890):
super(PoseFeature, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.norm1 = torch.nn.InstanceNorm1d(64)
self.norm2 = torch.nn.InstanceNorm1d(128)
self.norm3 = torch.nn.InstanceNorm1d(1024)
def forward(self, x):
x = F.relu(self.norm1(self.conv1(x)))
x = F.relu(self.norm2(self.conv2(x)))
x = F.relu(self.norm3(self.conv3(x)))
return x
class SPAdaIN(nn.Module):
def __init__(self,norm,input_nc,planes):
super(SPAdaIN,self).__init__()
self.conv_weight = nn.Conv1d(input_nc, planes, 1)
self.conv_bias = nn.Conv1d(input_nc, planes, 1)
self.norm = norm(planes)
def forward(self,x,addition):
x = self.norm(x)
weight = self.conv_weight(addition)
bias = self.conv_bias(addition)
out = weight * x + bias
return out
class SPAdaINResBlock(nn.Module):
def __init__(self,input_nc,planes,norm=nn.InstanceNorm1d,conv_kernel_size=1,padding=0):
super(SPAdaINResBlock,self).__init__()
self.spadain1 = SPAdaIN(norm=norm,input_nc=input_nc,planes=planes)
self.relu = nn.ReLU()
self.conv1 = nn.Conv1d(planes, planes, kernel_size=conv_kernel_size, stride=1, padding=padding)
self.spadain2 = SPAdaIN(norm=norm,input_nc=input_nc,planes=planes)
self.conv2 = nn.Conv1d(planes,planes,kernel_size=conv_kernel_size, stride=1, padding=padding)
self.spadain_res = SPAdaIN(norm=norm,input_nc=input_nc,planes=planes)
self.conv_res=nn.Conv1d(planes,planes,kernel_size=conv_kernel_size, stride=1, padding=padding)
def forward(self,x,addition):
out = self.spadain1(x,addition)
out = self.relu(out)
out = self.conv1(out)
out = self.spadain2(out,addition)
out = self.relu(out)
out = self.conv2(out)
residual = x
residual = self.spadain_res(residual,addition)
residual = self.relu(residual)
residual = self.conv_res(residual)
out = out + residual
return out
class Decoder(nn.Module):
def __init__(self, bottleneck_size = 1024):
self.bottleneck_size = bottleneck_size
super(Decoder, self).__init__()
self.conv1 = torch.nn.Conv1d(self.bottleneck_size, self.bottleneck_size, 1)
self.conv2 = torch.nn.Conv1d(self.bottleneck_size, self.bottleneck_size//2, 1)
self.conv3 = torch.nn.Conv1d(self.bottleneck_size//2, self.bottleneck_size//4, 1)
self.conv4 = torch.nn.Conv1d(self.bottleneck_size//4, 3, 1)
self.spadain_block1 = SPAdaINResBlock(input_nc=3 ,planes=self.bottleneck_size)
self.spadain_block2 = SPAdaINResBlock(input_nc=3 ,planes=self.bottleneck_size//2)
self.spadain_block3 = SPAdaINResBlock(input_nc=3 ,planes=self.bottleneck_size//4)
self.norm1 = torch.nn.InstanceNorm1d(self.bottleneck_size)
self.norm2 = torch.nn.InstanceNorm1d(self.bottleneck_size//2)
self.norm3 = torch.nn.InstanceNorm1d(self.bottleneck_size//4)
self.th = nn.Tanh()
def forward(self, x, addition):
x = self.conv1(x)
x = self.spadain_block1(x,addition)
x = self.conv2(x)
x = self.spadain_block2(x,addition)
x = self.conv3(x)
x = self.spadain_block3(x,addition)
x = 2*self.th(self.conv4(x))
return x
class NPT(nn.Module):
def __init__(self, num_points = 6890, bottleneck_size = 1024):
super(NPT, self).__init__()
self.num_points = num_points
self.bottleneck_size = bottleneck_size
self.encoder = PoseFeature(num_points = num_points)
self.decoder = Decoder(bottleneck_size = self.bottleneck_size+3)
def forward(self, x1, x2):
x1 = self.encoder(x1)
y = torch.cat((x1, x2), 1)
out =self.decoder(y,x2)
return out.transpose(2,1)
``` |
{
"source": "JiaShun-Xiao/fast-BLAST-coded-by-python-",
"score": 2
} |
#### File: JiaShun-Xiao/fast-BLAST-coded-by-python-/build_library.py
```python
from numba import jit
import numpy as np
import re
from multiprocessing import Pool
from math import ceil
@jit
def BaseToNum(chr_seq):
chr_seq = re.sub(r'A', '1', chr_seq)
chr_seq = re.sub(r'C', '2', chr_seq)
chr_seq = re.sub(r'G', '3', chr_seq)
chr_seq = re.sub(r'T', '4', chr_seq)
return chr_seq
@jit
def BaseToIndex(word,word_len):
tmp = 0
for i,v in enumerate(word):
tmp += (int(v)-1)*4**(word_len-i)
return tmp
@jit
def GenSeek(library,word_len):
seeks = np.zeros((4**word_len,2),dtype=int)
tmp = 0
for i,l in enumerate(library):
seeks[i,0] = tmp
seeks[i,1] = len(l)
tmp += len(l)
return seeks
def BuildLibrary(chr_name):
word_len = 11
chr_seq = chrom_dict[chr_name]
chr_seq = BaseToNum(chr_seq)
chr_len = len(chr_seq)
library = np.zeros(4**word_len,dtype=str).tolist()
ii = 0
while ii<chr_len-word_len:
w = chr_seq[ii:ii+word_len]
ii += 1
if 'N' in w:
continue
try:
library[BaseToIndex(w,word_len-1)] += str(ii)+","
except:
pass
seeks = GenSeek(library,word_len)
lib_seq = ''.join(library)
with open('/home/jxiaoae/class/blast/chromosome_{}_library.txt'.format(chr_name), 'w') as f:
f.write(lib_seq)
f.close()
np.save('/home/jxiaoae/class/blast/chromosome_{}_library_seeks.npy'.format(chr_name),seeks)
if __name__ == '__main__':
hg19 = open("/home/share/GRCh37/human_g1k_v37.fasta")
head = True
chrom_dict = {}
head_line = []
chr_names = []
for line in hg19:
if re.match(r">[1-9X-Y]|[12][0-9]",line):
head_line.append(line)
if head:
head = False
else:
chr_seq = re.sub(r'\n', '', chr_seq)
chr_seq = chr_seq.upper()
chrom_dict[chr_name] = chr_seq
chr_name = line.split()[0][1:]
chr_names.append(chr_name)
chr_seq = ''
print(chr_name,end=",")
else:
chr_seq += line
chr_seq = re.sub(r'\n', '', chr_seq)
chr_seq = chr_seq.upper()
chrom_dict[chr_name] = chr_seq
chrom_seek_index = np.array([[int(line.split(":")[-2]),len(line)] for line in head_line])
for i in range(1,24):
chrom_seek_index[i,1]=chrom_seek_index[i,1]+chrom_seek_index[i-1,1]+chrom_seek_index[i-1,0]+ceil(chrom_seek_index[i-1,0]/60)
np.save('/home/jxiaoae/class/blast/GRCh37_chrom_seek_index.npy',chrom_seek_index)
np.save('/home/jxiaoae/class/blast/GRCh37_chr_names.npy',np.array(chr_names))
print(chr_names)
# reset multiprocessing num according to your server
with Pool(10) as p:
p.map(BuildLibrary, chr_names)
``` |
{
"source": "jiashuyu/Image-Classifier-APP",
"score": 2
} |
#### File: jiashuyu/Image-Classifier-APP/predict.py
```python
import argparse
import matplotlib.pyplot as plt
import numpy as np
import time
import json
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms, models
from collections import OrderedDict
from PIL import Image
data_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
def get_input_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default='./flowers/valid/1/image_06739.jpg',
help='path to the image that you want to predict')
parser.add_argument('--checkpoint', type=str, default='./checkpoint.pth',
help='path of the checkpoint')
parser.add_argument('--top_k', type=int, default='5',
help='top k categories predicted')
parser.add_argument('--category_names', type=str, default='./cat_to_name.json',
help='path of the category names')
parser.add_argument('--gpu', type=bool, default=True,
help='whether to use gpu for training')
return parser.parse_args()
# Write a function that loads a checkpoint and rebuilds the model
def load_checkpoint(filepath):
cp = torch.load(filepath)
# Load my models
densenet = models.densenet121(pretrained=True)
alexnet = models.alexnet(pretrained=True)
vgg = models.vgg11(pretrained=True)
model_dict = {'densenet121': densenet, 'alexnet': alexnet, 'vgg11': vgg}
model = model_dict[cp['model_name']]
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(cp['classifier_input_size'], cp['classifier_hidden_layers'])),
('relu', nn.ReLU()),
('fc2', nn.Linear(cp['classifier_hidden_layers'], cp['output_size'])),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
model.load_state_dict(cp['state_dict'])
model.class_to_idx = cp['class_to_idx']
return model
def process_image(image):
im = Image.open(image)
image = data_transforms(im)
return image.numpy()
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.numpy().transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.'''
# predict the top 5 classes from an image file
np_image = process_image(image_path)
image = torch.from_numpy(np_image)
# image.to('cuda')
# model.to('cuda')
image.unsqueeze_(0)
model.eval()
output = model(image)
x = torch.topk(output, topk)
list_of_class = {}
np_log_probs = x[0][0].detach().numpy()
tags = x[1][0].detach().numpy()
for i in range(topk):
for classes, idx in model.class_to_idx.items():
if idx == tags[i]:
list_of_class[classes] = np.exp(np_log_probs[i])
return list_of_class
def show_names(cat_to_name, dictionary):
name_of_class = {}
for classes, prob in dictionary.items():
name_of_class[cat_to_name[classes]] = prob
return name_of_class
def main():
in_arg = get_input_args()
with open(in_arg.category_names, 'r') as f:
cat_to_name = json.load(f)
m = load_checkpoint(in_arg.checkpoint)
# Display the top 5 classes along with their probabilities
list_of_class = predict(in_arg.input, m, in_arg.top_k)
print(show_names(cat_to_name, list_of_class))
if __name__ == "__main__":
main()
```
#### File: jiashuyu/Image-Classifier-APP/train.py
```python
import argparse
from time import time
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms, models
from collections import OrderedDict
# Define transform the training, validation, and testing sets, note that the validation and testing sets share the same transforms
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
valid_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
def get_input_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./flowers',
help='path to folder of images')
parser.add_argument('--save_dir', type=str, default='./',
help='path to the checkpoint')
parser.add_argument('--arch', type=str, default='alexnet',
help='chosen model from alexnet, vgg11, densenet121')
parser.add_argument('--learning_rate', type=float, default=0.001,
help='learning rate')
parser.add_argument('--hidden_units', type=int, default=1000,
help='number of neurons in the hidden layer')
parser.add_argument('--epochs', type=int, default=1,
help='number of epochs used for training')
parser.add_argument('--gpu', type=bool, default=True,
help='whether to use gpu for training')
return parser.parse_args()
def main():
in_arg = get_input_args()
train_dir = in_arg.data_dir + '/train'
valid_dir = in_arg.data_dir + '/valid'
test_dir = in_arg.data_dir + '/test'
# Load the datasets with ImageFolder
train_dataset = datasets.ImageFolder(train_dir, transform=train_transforms)
valid_dataset = datasets.ImageFolder(valid_dir, transform=valid_transforms)
test_dataset = datasets.ImageFolder(test_dir, transform=valid_transforms)
# Using the image datasets and the trainforms, define the dataloaders
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=32, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=True)
# Load my models
densenet = models.densenet121(pretrained=True)
alexnet = models.alexnet(pretrained=True)
vgg = models.vgg11(pretrained=True)
model_dict = {'densenet121': densenet, 'alexnet': alexnet, 'vgg11': vgg}
model = model_dict[in_arg.arch]
classifier_inputs = {'densenet121': 1024, 'alexnet': 9216, 'vgg11': 25088}
# Freeze parameters so we don't backprop through them
for p in model.parameters():
p.requires_grad = False
# change the classifier of the model so that we can have 102 outputs
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(classifier_inputs[in_arg.arch], in_arg.hidden_units)),
('relu', nn.ReLU()),
('fc2', nn.Linear(in_arg.hidden_units, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
# Train a model with a pre-trained network
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=in_arg.learning_rate)
epochs = in_arg.epochs
print_every = 40
steps = 0
# change to cuda if gpu is available and requested
if (in_arg.gpu == True) & (torch.cuda.is_available()):
model.to('cuda')
start_time = time()
for e in range(epochs):
model.train()
running_loss = 0
for ii, (inputs, labels) in enumerate(train_loader):
steps += 1
if (in_arg.gpu == True) & (torch.cuda.is_available()):
inputs, labels = inputs.to('cuda'), labels.to('cuda')
optimizer.zero_grad()
# Forward and backward passes
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
model.eval()
correct = 0
total = 0
test_loss = 0
with torch.no_grad():
for data in valid_loader:
images, labels = data
if (in_arg.gpu == True) & (torch.cuda.is_available()):
images = images.to('cuda')
labels = labels.to('cuda')
outputs = model.forward(images)
test_loss += criterion(outputs, labels).item()
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print("Epoch: {}/{}... ".format(e+1, epochs),
"Training Loss: {:.4f}".format(running_loss/print_every),
" Test Loss: {:.4f}".format(test_loss/len(valid_loader)),
" Accuracy: {:.4f}".format(correct / total))
running_loss = 0
model.train()
end_time = time()
print('total time used for training:', end_time-start_time, 'seconds')
# Save the checkpoint
checkpoint = {'class_to_idx': train_dataset.class_to_idx,
'model_name': in_arg.arch,
'classifier_input_size': classifier_inputs[in_arg.arch],
'output_size': 102,
'classifier_hidden_layers': in_arg.hidden_units,
'state_dict': model.state_dict()}
torch.save(checkpoint, in_arg.save_dir + 'checkpoint.pth')
if __name__ == "__main__":
main()
``` |
{
"source": "jiasir/playback",
"score": 3
} |
#### File: playback/cli/cli.py
```python
import sys
import argparse
import pkg_resources
from playback import __version__
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='OpenStack provisioning and orchestration library with command-line tools'
)
parser.add_argument(
'-v', '--version',
action='version', version=__version__ ,
)
parser.add_argument(
'--user',
help='the username to connect to the remote host', action='store', default='ubuntu', dest='user'
)
parser.add_argument(
'--hosts',
help='the remote host to connect to ', action='store', default=None, dest='hosts'
)
parser.add_argument(
'-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting', action='store', dest='key_filename', default=None
)
parser.add_argument(
'--password',
help='the password used by the SSH layer when connecting to remote hosts', action='store', dest='password', default=None
)
subparser = parser.add_subparsers(
title='commands',
metavar='COMMAND',
help='description',
)
provision_parser = subparser.add_parser(
'provision',
help='provision and manage OpenStack'
)
provision_subparser = provision_parser.add_subparsers(
title='commands',
metavar='COMMAND',
help='description',
)
entry_points = [
(ep.name, ep.load())
for ep in pkg_resources.iter_entry_points('provision')
]
entry_points.sort(
key=lambda (name, fn): getattr(fn, 'priority', 100),
)
for (name, fn) in entry_points:
p = provision_subparser.add_parser(
name,
description=fn.__doc__,
help=fn.__doc__,
)
fn(p)
return parser
def _main():
parser = get_parser()
if len (sys.argv) < 2 :
parser.print_help()
sys.exit()
else :
args = parser.parse_args()
return args.func(args)
def main():
try:
_main()
except:
pass
if __name__ == '__main__':
main()
```
#### File: playback/cli/neutron.py
```python
import sys, logging
from playback.api import Neutron
from cliff.command import Command
def make_target(args):
try:
target = Neutron(user=args.user, hosts=args.hosts.split(','), key_filename=args.key_filename, password=args.password)
except Exception:
sys.stderr.write('No hosts found. Please using --hosts param.')
sys.exit(1)
return target
def create_neutron_db(args):
target = make_target(args)
target.create_neutron_db(args.root_db_pass, args.neutron_db_pass)
def create_service_credentials(args):
target = make_target(args)
target.create_service_credentials(
args.os_password,
args.os_auth_url,
args.neutron_pass,
args.public_endpoint,
args.internal_endpoint,
args.admin_endpoint)
def install(args):
target = make_target(args)
target.install_self_service(
args.connection,
args.rabbit_hosts,
args.rabbit_user,
args.rabbit_pass,
args.auth_uri,
args.auth_url,
args.neutron_pass,
args.nova_url,
args.nova_pass,
args.public_interface,
args.local_ip,
args.nova_metadata_ip,
args.metadata_proxy_shared_secret,
args.memcached_servers,
args.populate)
class CreateNeutronDB(Command):
"""create the neutron database"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(CreateNeutronDB, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
parser.add_argument('--root-db-pass',
help='the openstack database root passowrd',
action='store', default=None, dest='root_db_pass')
parser.add_argument('--neutron-db-pass',
help='neutron db passowrd',
action='store', default=None, dest='neutron_db_pass')
return parser
def take_action(self, parsed_args):
create_neutron_db(parsed_args)
class CreateServiceCredentials(Command):
"""create the neutron service credentials"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(CreateServiceCredentials, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
parser.add_argument('--os-password',
help='the password for admin user',
action='store', default=None, dest='os_password')
parser.add_argument('--os-auth-url',
help='keystone endpoint url e.g. http://CONTROLLER_VIP:35357/v3',
action='store', default=None, dest='os_auth_url')
parser.add_argument('--neutron-pass',
help='the password for neutron user',
action='store', default=None, dest='neutron_pass')
parser.add_argument('--public-endpoint',
help='public endpoint for neutron service e.g. http://CONTROLLER_VIP:9696',
action='store', default=None, dest='public_endpoint')
parser.add_argument('--internal-endpoint',
help='internal endpoint for neutron service e.g. http://CONTROLLER_VIP:9696',
action='store', default=None, dest='internal_endpoint')
parser.add_argument('--admin-endpoint',
help='admin endpoint for neutron service e.g. http://CONTROLLER_VIP:9696',
action='store', default=None, dest='admin_endpoint')
return parser
def take_action(self, parsed_args):
create_service_credentials(parsed_args)
class Install(Command):
"""install neutron for self-service"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Install, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
parser.add_argument('--connection',
help='mysql database connection string e.g. mysql+pymysql://neutron:NEUTRON_PASS@CONTROLLER_VIP/neutron',
action='store', default=None, dest='connection')
parser.add_argument('--auth-uri',
help='keystone internal endpoint e.g. http://CONTROLLER_VIP:5000',
action='store', default=None, dest='auth_uri')
parser.add_argument('--auth-url',
help='keystone admin endpoint e.g. http://CONTROLLER_VIP:35357',
action='store', default=None, dest='auth_url')
parser.add_argument('--rabbit-hosts',
help='rabbit hosts e.g. controller1,controller2',
action='store', default=None, dest='rabbit_hosts')
parser.add_argument('--rabbit-user',
help='the user for rabbit, default openstack',
action='store', default='openstack', dest='rabbit_user')
parser.add_argument('--rabbit-pass',
help='the password for rabbit openstack user',
action='store', default=None, dest='rabbit_pass')
parser.add_argument('--neutron-pass',
help='the password for neutron user',
action='store', default=None, dest='neutron_pass')
parser.add_argument('--nova-url',
help='URL for connection to nova (Only supports one nova region currently) e.g. http://CONTROLLER_VIP:8774/v2.1',
action='store', default=None, dest='nova_url')
parser.add_argument('--nova-pass',
help='passowrd for nova user',
action='store', default=None, dest='nova_pass')
parser.add_argument('--public-interface',
help='public interface e.g. eth1',
action='store', default=None, dest='public_interface')
parser.add_argument('--local-ip',
help=' underlying physical network interface that handles overlay networks(uses the management interface IP)',
action='store', default=None, dest='local_ip')
parser.add_argument('--nova-metadata-ip',
help='IP address used by Nova metadata server e.g. CONTROLLER_VIP',
action='store', default=None, dest='nova_metadata_ip')
parser.add_argument('--metadata-proxy-shared-secret',
help='metadata proxy shared secret',
action='store', default=None, dest='metadata_proxy_shared_secret')
parser.add_argument('--memcached-servers',
help='memcached servers e.g. CONTROLLER1:11211,CONTROLLER2:11211',
action='store', default=None, dest='memcached_servers')
parser.add_argument('--populate',
help='Populate the neutron database',
action='store_true', default=False, dest='populate')
return parser
def take_action(self, parsed_args):
install(parsed_args)
```
#### File: playback/cli/nova.py
```python
import sys, logging
from playback.cli.cliutil import priority
from playback.api import Nova
from cliff.command import Command
def make_target(args):
try:
target = Nova(user=args.user, hosts=args.hosts.split(','), key_filename=args.key_filename,
password=args.password)
except AttributeError:
sys.stderr.write('No hosts found. Please using --hosts param.')
sys.exit(1)
return target
def create_nova_db(args):
target = make_target(args)
target.create_nova_db(args.root_db_pass, args.nova_db_pass)
def create_service_credentials(args):
target = make_target(args)
target.create_service_credentials(args.os_password,
args.os_auth_url, args.nova_pass,
args.public_endpoint, args.internal_endpoint,
args.admin_endpoint)
def install(args):
target = make_target(args)
target.install_nova(args.connection, args.api_connection, args.auth_uri, args.auth_url,
args.nova_pass, args.my_ip, args.memcached_servers, args.rabbit_hosts, args.rabbit_user,
args.rabbit_pass, args.glance_api_servers, args.neutron_endpoint, args.neutron_pass,
args.metadata_proxy_shared_secret, args.populate)
class CreateNovaDB(Command):
"""create the nova and nova_api database"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(CreateNovaDB, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
parser.add_argument('--root-db-pass',
help='the MySQL database root passowrd',
action='store', default=None, dest='root_db_pass')
parser.add_argument('--nova-db-pass',
help='nova and nova_api database passowrd',
action='store', default=None, dest='nova_db_pass')
return parser
def take_action(self, parsed_args):
create_nova_db(parsed_args)
class CreateServiceCredentials(Command):
"""create the nova service credentials"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(CreateServiceCredentials, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
parser.add_argument('--os-password',
help='the password for admin user',
action='store', default=None, dest='os_password')
parser.add_argument('--os-auth-url',
help='keystone endpoint url e.g. http://CONTROLLER_VIP:35357/v3',
action='store', default=None, dest='os_auth_url')
parser.add_argument('--nova-pass',
help='passowrd for nova user',
action='store', default=None, dest='nova_pass')
parser.add_argument('--public-endpoint',
help=r'public endpoint for nova service e.g. "http://CONTROLLER_VIP:8774/v2.1/%%\(tenant_id\)s"',
action='store', default=None, dest='public_endpoint')
parser.add_argument('--internal-endpoint',
help=r'internal endpoint for nova service e.g. "http://CONTROLLER_VIP:8774/v2.1/%%\(tenant_id\)s"',
action='store', default=None, dest='internal_endpoint')
parser.add_argument('--admin-endpoint',
help=r'admin endpoint for nova service e.g. "http://CONTROLLER_VIP:8774/v2.1/%%\(tenant_id\)s"',
action='store', default=None, dest='admin_endpoint')
return parser
def take_action(self, parsed_args):
create_service_credentials(parsed_args)
class Install(Command):
"""install nova"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Install, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
parser.add_argument('--connection',
help='mysql nova database connection string e.g. mysql+pymysql://nova:NOVA_PASS@CONTROLLER_VIP/nova',
action='store', default=None, dest='connection')
parser.add_argument('--api-connection',
help='mysql nova_api database connection string e.g. mysql+pymysql://nova:NOVA_PASS@CONTROLLER_VIP/nova_api',
action='store', default=None, dest='api_connection')
parser.add_argument('--auth-uri',
help='keystone internal endpoint e.g. http://CONTROLLER_VIP:5000',
action='store', default=None, dest='auth_uri')
parser.add_argument('--auth-url',
help='keystone admin endpoint e.g. http://CONTROLLER_VIP:35357',
action='store', default=None, dest='auth_url')
parser.add_argument('--nova-pass',
help='<PASSWORD> for nova user',
action='store', default=None, dest='nova_pass')
parser.add_argument('--my-ip',
help='the host management ip',
action='store', default=None, dest='my_ip')
parser.add_argument('--memcached-servers',
help='memcached servers e.g. CONTROLLER1:11211,CONTROLLER2:11211',
action='store', default=None, dest='memcached_servers')
parser.add_argument('--rabbit-hosts',
help='rabbit hosts e.g. CONTROLLER1,CONTROLLER2',
action='store', default=None, dest='rabbit_hosts')
parser.add_argument('--rabbit-user',
help='the user for rabbit openstack user, default openstack',
action='store', default='openstack', dest='rabbit_user')
parser.add_argument('--rabbit-pass',
help='the password for rabbit openstack user',
action='store', default=None, dest='rabbit_pass')
parser.add_argument('--glance-api-servers',
help='glance host e.g. http://CONTROLLER_VIP:9292',
action='store', default=None, dest='glance_api_servers')
parser.add_argument('--neutron-endpoint',
help='neutron endpoint e.g. http://CONTROLLER_VIP:9696',
action='store', default=None, dest='neutron_endpoint')
parser.add_argument('--neutron-pass',
help='the password for neutron user',
action='store', default=None, dest='neutron_pass')
parser.add_argument('--metadata-proxy-shared-secret',
help='metadata proxy shared secret',
action='store', default=None, dest='metadata_proxy_shared_secret')
parser.add_argument('--populate', help='Populate the nova database',
action='store_true', default=False, dest='populate')
return parser
def take_action(self, parsed_args):
install(parsed_args)
```
#### File: playback/cli/rabbitmq.py
```python
import sys
import logging
from playback.api import RabbitMq
from cliff.command import Command
def make_target(args):
try:
target = RabbitMq(user=args.user, hosts=args.hosts.split(','), key_filename=args.key_filename,
password=args.password)
except AttributeError:
sys.stderr.write('No hosts found. Please using --hosts param.')
sys.exit(1)
return target
def install(args):
target = make_target(args)
target.install(args.erlang_cookie, args.rabbit_user, args.rabbit_pass)
def join_cluster(args):
target = make_target(args)
target.join_cluster(args.name)
class Install(Command):
"""install rabbitmq"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Install, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
parser.add_argument('--erlang-cookie',
help='setup elang cookie',
action='store', default=None, dest='erlang_cookie')
parser.add_argument('--rabbit-user',
help='set rabbit user name',
action='store', default=None, dest='rabbit_user')
parser.add_argument('--rabbit-pass',
help='set rabbit password',
action='store', default=None, dest='rabbit_pass')
return parser
def take_action(self, parsed_args):
install(parsed_args)
class JoinCluster(Command):
"""join the rabbit cluster"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(JoinCluster, self).get_parser(prog_name)
parser.add_argument('--name',
help='the joined name, e.g. rabbit@CONTROLLER1',
action='store', default=None, dest='name')
return parser
def take_action(self, parsed_args):
join_cluster(parsed_args)
```
#### File: playback/playback/common.py
```python
import argparse
from fabric.api import *
from fabric.tasks import Task
from playback import __version__
class Common(Task):
"""
the common library for OpenStack Provisioning
:param user(str): the user for remote server to login
:param hosts(list): this is a second param
:param key_filename(str): the ssh private key to used, default None
:param password(str): the password for remote server
:param parallel(bool): paralleler execute on remote server, default True
:returns: None
"""
def __init__(self, user='ubuntu', hosts=None, key_filename=None, password=<PASSWORD>, parallel=True, *args, **kwargs):
super(Common, self).__init__(*args, **kwargs)
self.user = user
self.hosts = hosts
self.parallel = parallel
self.key_filename = key_filename
self.password = password
env.user = self.user
env.hosts = self.hosts
env.parallel = self.parallel
env.key_filename = self.key_filename
env.password = <PASSWORD>
env.abort_on_prompts = False
def _release(self):
release = sudo('lsb_release -cs')
return release
```
#### File: playback/playback/haproxy_install.py
```python
from fabric.api import *
from playback import common
class HaproxyInstall(common.Common):
"""
HAProxy and Keepalived Installation
:param user(str): the user for remote server to login
:param hosts(list): this is a second param
:param key_filename(str): the ssh private key to used, default None
:param password(str): the password for remote server
:param parallel(bool): paralleler execute on remote server, default True
:returns: None
:examples:
.. code-block:: python
# create an haproxy instance
haproxy = HaproxyInstall(user='ubuntu', hosts=['haproxy1', 'haproxy2'])
# install haproxy on haproxy1 and haproxy2
haproxy.install()
"""
def _install(self):
sudo('apt-get update')
sudo('apt-get install -y haproxy keepalived mysql-client')
def install(self):
"""
Install HAProxy and Keepalived
:returns: None
"""
return execute(self._install)
```
#### File: playback/playback/mysql_installation.py
```python
from fabric.api import *
from fabric.contrib import files
from playback import common
from playback.templates.galera_list import (conf_galera_list_trusty,
conf_galera_list_xenial)
class MysqlInstallation(common.Common):
"""
Install Galera Cluster for MySQL
:param user(str): the user for remote server to login
:param hosts(list): this is a second param
:param key_filename(str): the ssh private key to used, default None
:param password(str): the password for remote server
:param parallel(bool): paralleler execute on remote server, default True
:returns: None
:examples:
.. code-block:: python
# create mysql installation isinstance
mysql = MysqlInstallation(
user='ubuntu',
hosts=['controller1', 'controller2']
)
# add mariadb galera cluster if xenial, galera cluster for mysql if trusty
mysql.enable_repo()
# install mysql ha on controller1 and controller2
mysql.install()
"""
def _enable_repo(self):
if self._release() == 'trusty':
conf_galera_list = conf_galera_list_trusty
sudo('apt-key adv --recv-keys --keyserver keyserver.ubuntu.com <KEY>')
if self._release() == 'xenial':
conf_galera_list = conf_galera_list_xenial
sudo(
'apt-key adv --recv-keys --keyserver keyserver.ubuntu.com <KEY>')
with cd('/etc/apt/sources.list.d/'):
sudo('rm -rf galera.list', warn_only=True)
files.append('galera.list', conf_galera_list, use_sudo=True)
sudo('apt-get update')
def enable_repo(self):
"""
Setup repository for trusty only
:returns: None
"""
return execute(self._enable_repo)
def _install(self):
if self._release() == 'trusty':
sudo('DEBIAN_FRONTEND=noninteractive apt-get install -y --force-yes galera-3 mysql-wsrep-5.6')
if self._release() == 'xenial':
sudo('DEBIAN_FRONTEND=noninteractive apt install -y --allow-downgrades --allow-remove-essential --allow-change-held-packages mariadb-client mariadb-galera-server galera rsync')
def install(self):
"""
Install Galera Cluster for MySQL if trusty, install MariaDB Galera Cluster if xenial
:returns: None
"""
return execute(self._install)
```
#### File: playback/playback/swift.py
```python
import argparse
import os
import sys
from fabric.api import *
from fabric.colors import red
from fabric.contrib import files
from fabric.network import disconnect_all
from playback import __version__, common
from playback.templates.proxy_server_conf import conf_proxy_server_conf
from playback.templates.swift_conf import conf_swift_conf
from playback.templates.memcached_conf import conf_memcached_conf
class Swift(common.Common):
"""
Deploy swift proxy node
:param user(str): the user for remote server to login
:param hosts(list): this is a second param
:param key_filename(str): the ssh private key to used, default None
:param password(str): the password for remote server
:param parallel(bool): paralleler execute on remote server, default True
:returns: None
:examples:
.. code-block:: python
# create swift proxy instances
swift_proxy1 = Swift(user='ubuntu', hosts=['controller1'])
swift_proxy2 = Swift(user='ubuntu', hosts=['controller2'])
# create the Identity service credentials(only do this at once)
swift_proxy1.create_service_credentials(
os_password='<PASSWORD>',
os_auth_url='http://192.168.1.1:35357/v3',
swift_pass='<PASSWORD>',
public_endpoint='http://192.168.1.1:8080/v1/AUTH_%\(tenant_id\)s',
internal_endpoint='http://192.168.1.1:8080/v1/AUTH_%\(tenant_id\)s',
admin_endpoint='http://192.168.1.1:8080/v1'
)
# install swift proxy
swift_proxy1.install(
auth_uri='http://192.168.1.1:5000',
auth_url='http://192.168.1.1:35357',
swift_pass='<PASSWORD>'
memcached_servers='controller1:11211,controller2:11211',
with_memcached=False # if True install memcached on this node
)
swift_proxy2.install(
auth_uri='http://192.168.1.1:5000',
auth_url='http://192.168.1.1:35357',
swift_pass='<PASSWORD>'
memcached_servers='controller1:11211,controller2:11211',
with_memcached=False # if True install memcached on this node
)
# see swift storage documents for full install of swift
"""
@runs_once
def _create_service_credentials(self, os_password, os_auth_url, swift_pass, public_endpoint, internal_endpoint, admin_endpoint):
with shell_env(OS_PROJECT_DOMAIN_NAME='default',
OS_USER_DOMAIN_NAME='default',
OS_PROJECT_NAME='admin',
OS_TENANT_NAME='admin',
OS_USERNAME='admin',
OS_PASSWORD=<PASSWORD>,
OS_AUTH_URL=os_auth_url,
OS_IDENTITY_API_VERSION='3',
OS_IMAGE_API_VERSION='2',
OS_AUTH_VERSION='3'):
print red(env.host_string + ' | Create the swift user')
sudo(
'openstack user create --domain default --password {0} swift'.format(swift_pass))
print red(env.host_string + ' | Add the admin role to the swift user and service project')
sudo('openstack role add --project service --user swift admin')
print red(env.host_string + ' | Create the swift service entity')
sudo('openstack service create --name swift --description "OpenStack Object Storage" object-store')
print red(env.host_string + ' | Create the Object Storage service API endpoints')
sudo(
'openstack endpoint create --region RegionOne object-store public {0}'.format(public_endpoint))
sudo(
'openstack endpoint create --region RegionOne object-store internal {0}'.format(internal_endpoint))
sudo(
'openstack endpoint create --region RegionOne object-store admin {0}'.format(admin_endpoint))
def create_service_credentials(self, *args, **kwargs):
r"""
Create the swift service credentials
:param os_password: the password of openstack `<PASSWORD>` user
:param os_auth_url: keystone endpoint url e.g. `http://CONTROLLER_VIP:35357/v3`
:param swift_pass: password of `<PASSWORD>` user
:param public_endpoint: public endpoint for swift service e.g. `http://CONTROLLER_VIP:8080/v1/AUTH_%\\(tenant_id\\)s`
:param internal_endpoint: internal endpoint for swift service e.g. `http://CONTROLLER_VIP:8080/v1/AUTH_%\\(tenant_id\\)s`
:param admin_endpoint: admin endpoint for swift service e.g. `http://CONTROLLER_VIP:8080/v1`
:returns: None
"""
return execute(self._create_service_credentials, *args, **kwargs)
def _install(self, auth_uri, auth_url, swift_pass, memcached_servers, with_memcached):
print red(env.host_string + ' | Install swift proxy')
sudo('apt-get update')
sudo('apt-get -y install swift swift-proxy python-swiftclient python-keystoneclient python-keystonemiddleware python-memcache')
# Install memcached
if with_memcached:
sudo('apt-get -y install memcached')
# Configure /etc/memcached.conf to listen 0.0.0.0
with open('tmp_memcached_conf_' + env.host_string, 'w') as f:
f.write(conf_memcached_conf)
files.upload_template(filename='tmp_memcached_conf_' + env.host_string,
destination='/etc/memcached.conf',
use_sudo=True,
backup=True)
os.remove('tmp_memcached_conf_' + env.host_string)
sudo('service memcached restart')
sudo('mkdir /etc/swift')
print red(env.host_string + ' | Update /etc/swift/proxy-server.conf')
with open('tmp_proxy_server_conf_' + env.host_string, 'w') as f:
f.write(conf_proxy_server_conf)
files.upload_template(filename='tmp_proxy_server_conf_' + env.host_string,
destination='/etc/swift/proxy-server.conf',
use_sudo=True,
use_jinja=True,
backup=True,
context={'auth_uri': auth_uri,
'auth_url': auth_url,
'swift_pass': swift_pass,
'memcached_servers': memcached_servers})
os.remove('tmp_proxy_server_conf_' + env.host_string)
def install(self, *args, **kwargs):
"""
Install swift proxy service
:param auth_uri: keystone internal endpoint e.g. `http://CONTROLLER_VIP:5000`
:param auth_url: keystone admin endpoint e.g. `http://CONTROLLER_VIP:35357`
:param swift_pass: password of `<PASSWORD>` user
:param memcached_servers: memcache servers e.g. `CONTROLLER1:11211,CONTROLLER2:11211`
:param with_memcached: install memcached on remote server, if you have other memcached on the controller node, you can use `memcached_serser`
:returns: None
"""
return execute(self._install, *args, **kwargs)
def _finalize_install(self, swift_hash_path_suffix, swift_hash_path_prefix):
print red(env.host_string + ' | Update /etc/swift/swift.conf')
with open('tmp_swift_conf_' + env.host_string, 'w') as f:
f.write(conf_swift_conf)
files.upload_template(filename='tmp_swift_conf_' + env.host_string,
destination='/etc/swift/swift.conf',
use_jinja=True,
use_sudo=True,
backup=True,
context={'swift_hash_path_suffix': swift_hash_path_suffix,
'swift_hash_path_prefix': swift_hash_path_prefix})
os.remove('tmp_swift_conf_' + env.host_string)
print red(env.host_string + ' | On all nodes, ensure proper ownership of the configuration directory')
sudo('chown -R root:swift /etc/swift')
print red(env.host_string + ' | On the controller node and any other nodes running the proxy service, restart the Object Storage proxy service including its dependencies')
sudo('service memcached restart', warn_only=True)
sudo('service swift-proxy restart', warn_only=True)
print red(env.host_string + ' | On the storage nodes, start the Object Storage services')
sudo('swift-init all start', warn_only=True)
sudo('swift-init all reload', warn_only=True)
def finalize_install(self, *args, **kwargs):
"""
Finalize swift installation
:param swift_hash_path_suffix: `swift_hash_path_suffix` and `swift_hash_path_prefix` are used as part of the hashing algorithm when determining data placement in the cluster. These values should remain secret and MUST NOT change once a cluster has been deployed
:param swift_hash_path_prefix: `swift_hash_path_suffix` and `swift_hash_path_prefix` are used as part of the hashing algorithm when determining data placement in the cluster. These values should remain secret and MUST NOT change once a cluster has been deployed
:returns: None
"""
return execute(self._finalize_install, *args, **kwargs)
```
#### File: playback/playback/swift_storage.py
```python
import argparse
import os
import sys
from fabric.api import *
from fabric.colors import red
from fabric.contrib import files
from fabric.network import disconnect_all
from tqdm import *
from playback import __version__, common
from playback.templates.account_server_conf import conf_account_server_conf
from playback.templates.container_server_conf import conf_container_server_conf
from playback.templates.object_server_conf import conf_object_server_conf
from playback.templates.rsync import conf_rsync
from playback.templates.rsyncd_conf import conf_rsyncd_conf
class SwiftStorage(common.Common):
"""
Deploy swift storage node
:param user(str): the user for remote server to login
:param hosts(list): this is a second param
:param key_filename(str): the ssh private key to used, default None
:param password(str): the password for remote server
:param parallel(bool): paralleler execute on remote server, default True
:returns: None
:examples:
.. code-block:: python
# create storage instances
swift_storage1 = SwiftStorage(user='ubuntu', hosts=['compute1'])
swift_storage2 = SwiftStorage(user='ubuntu', hosts=['compute2'])
# prepare disks on storage nodes
swift_storage1.prepare_disks('sdb,sdc,sdd,sde')
swift_storage2.prepare_disks('sdb,sdc,sdd,sde')
# install swift storage
swift_storage1.install(
address='192.168.1.11'
bind_ip='192.168.1.11'
)
swift_storage2.install(
address='192.168.1.12'
bind_ip='192.168.1.12'
)
# create a ring instance
ring = SwiftStorage(user='ubuntu', hosts=['controller1'])
# create account ring
ring.create_account_builder_file(
partitions=10,
replicas=3,
moving=1
)
ring.account_builder_add(
region=1,
zone=1,
ip='192.168.1.11',
device='sdb',
weight=100
)
ring.account_builder_add(
region=1,
zone=1,
ip='192.168.1.11',
device='sdc',
weight=100
)
ring.account_builder_add(
region=1,
zone=1,
ip='192.168.1.11',
device='sdd',
weight=100
)
ring.account_builder_add(
region=1,
zone=1,
ip='192.168.1.11',
device='sde',
weight=100
)
ring.account_builder_add(
region=1,
zone=1,
ip='192.168.1.12',
device='sdb',
weight=100
)
ring.account_builder_add(
region=1,
zone=1,
ip='192.168.1.12',
device='sdc',
weight=100
)
ring.account_builder_add(
region=1,
zone=1,
ip='192.168.1.12',
device='sdd',
weight=100
)
ring.account_builder_add(
region=1,
zone=1,
ip='192.168.1.12',
device='sde',
weight=100
)
ring.account_builder_rebalance()
# create container ring
ring.create_container_builder_file(
partitions=10,
replicas=3,
moving=1
)
ring.container_builder_add(
region=1,
zone=1,
ip='192.168.1.11',
device='sdb',
weight=100
)
ring.container_builder_add(
region=1,
zone=1,
ip='192.168.1.11',
device='sdc',
weight=100
)
ring.container_builder_add(
region=1,
zone=1,
ip='192.168.1.11',
device='sdd',
weight=100
)
ring.container_builder_add(
region=1,
zone=1,
ip='192.168.1.11',
device='sde',
weight=100
)
ring.container_builder_add(
region=1,
zone=1,
ip='192.168.1.12',
device='sdb',
weight=100
)
ring.container_builder_add(
region=1,
zone=1,
ip='192.168.1.12',
device='sdc',
weight=100
)
ring.container_builder_add(
region=1,
zone=1,
ip='192.168.1.12',
device='sdd',
weight=100
)
ring.container_builder_add(
region=1,
zone=1,
ip='192.168.1.12',
device='sde',
weight=100
)
ring.container_builder_rebalance()
# create object ring
ring.create_object_builder_file(
partitions=10,
replicas=3,
moving=1
)
ring.object_builder_add(
region=1,
zone=1,
ip='192.168.1.11',
device='sdb',
weight=100
)
ring.object_builder_add(
region=1,
zone=1,
ip='192.168.1.11',
device='sdc',
weight=100
)
ring.object_builder_add(
region=1,
zone=1,
ip='192.168.1.11',
device='sdd',
weight=100
)
ring.object_builder_add(
region=1,
zone=1,
ip='192.168.1.11',
device='sde',
weight=100
)
ring.object_builder_add(
region=1,
zone=1,
ip='192.168.1.12',
device='sdb',
weight=100
)
ring.object_builder_add(
region=1,
zone=1,
ip='192.168.1.12',
device='sdc',
weight=100
)
ring.object_builder_add(
region=1,
zone=1,
ip='192.168.1.12',
device='sdd',
weight=100
)
ring.object_builder_add(
region=1,
zone=1,
ip='192.168.1.12',
device='sde',
weight=100
)
ring.object_builder_rebalance()
# sync the builder file from controller node to each storage node and other any proxy node
ring.sync_builder_file(
hosts=['controller2', 'compute1', 'compute2']
)
# finalize installation on all nodes
from playback.swift import Swift
finalize = Swift(user='ubuntu', hosts=['controller1','controller2', 'compute1', 'compute2'])
finalize.finalize_install(
swift_hash_path_suffix='changeme',
swift_hash_path_prefix='changeme'
)
"""
def _prepare_disks(self, disks_name):
"""format disks to xfs and mount it"""
fstab = '/etc/fstab'
for disk in tqdm(disks_name.split(',')):
sudo('umount /dev/{0}'.format(disk), warn_only=True)
if sudo('mkfs.xfs -f /dev/{0}'.format(disk), warn_only=True).failed:
sudo('apt-get update')
sudo('apt-get -y install xfsprogs')
sudo('mkfs.xfs -f /dev/{0}'.format(disk))
sudo('mkdir -p /srv/node/{0}'.format(disk))
files.append(
fstab, '/dev/{0} /srv/node/{1} xfs noatime,nodiratime,nobarrier,logbufs=8 0 2'.format(disk, disk), use_sudo=True)
sudo('mount /srv/node/{0}'.format(disk))
def prepare_disks(self, *args, **kwargs):
"""
Prepare the disks for storage
:param disks_name: the device name, e.g. `sdb,sdc`
:returns: None
"""
return execute(self._prepare_disks, *args, **kwargs)
def _install(self, address, bind_ip):
print red(env.host_string + ' | Install the supporting utility packages')
sudo('apt-get update')
sudo('apt-get -y install xfsprogs rsync')
print red(env.host_string + ' | Update /etc/rsyncd.conf')
with open('tmp_rsyncd_conf_' + env.host_string, 'w') as f:
f.write(conf_rsyncd_conf)
files.upload_template(filename='tmp_rsyncd_conf_' + env.host_string,
destination='/etc/rsyncd.conf',
use_jinja=True,
use_sudo=True,
backup=True,
context={'address': address})
os.remove('tmp_rsyncd_conf_' + env.host_string)
print red(env.host_string + ' | Update /etc/default/rsync')
with open('tmp_rsync_' + env.host_string, 'w') as f:
f.write(conf_rsync)
files.upload_template(filename='tmp_rsync_' + env.host_string,
destination='/etc/default/rsync',
use_jinja=True,
use_sudo=True,
backup=True)
os.remove('tmp_rsync_' + env.host_string)
print red(env.host_string + ' | Start the rsync service')
if sudo('service rsync start', warn_only=True).failed:
sudo('service rsync restart')
print red(env.host_string + ' | Install swift storage')
sudo('apt-get -y install swift swift-account swift-container swift-object')
print red(env.host_string + ' | Update /etc/swift/account-server.conf')
with open('tmp_account_server_conf_' + env.host_string, 'w') as f:
f.write(conf_account_server_conf)
files.upload_template(filename='tmp_account_server_conf_' + env.host_string,
destination='/etc/swift/account-server.conf',
use_jinja=True,
use_sudo=True,
backup=True,
context={'bind_ip': bind_ip})
os.remove('tmp_account_server_conf_' + env.host_string)
print red(env.host_string + ' | Update /etc/swift/container-server.conf')
with open('tmp_container_server_conf_' + env.host_string, 'w') as f:
f.write(conf_container_server_conf)
files.upload_template(filename='tmp_container_server_conf_' + env.host_string,
destination='/etc/swift/container-server.conf',
use_jinja=True,
use_sudo=True,
backup=True,
context={'bind_ip': bind_ip})
os.remove('tmp_container_server_conf_' + env.host_string)
print red(env.host_string + ' | Update /etc/swift/object-server.conf')
with open('tmp_object_server_conf_' + env.host_string, 'w') as f:
f.write(conf_object_server_conf)
files.upload_template(filename='tmp_object_server_conf_' + env.host_string,
destination='/etc/swift/object-server.conf',
use_jinja=True,
use_sudo=True,
backup=True,
context={'bind_ip': bind_ip})
os.remove('tmp_object_server_conf_' + env.host_string)
print red(env.host_string + ' | Ensure proper ownership of the mount point directory structure')
sudo('chown -R swift:swift /srv/node')
print red(env.host_string + ' | Create the recon directory and ensure proper ownership of it')
sudo('mkdir -p /var/cache/swift')
sudo('chown -R root:swift /var/cache/swift')
sudo('chmod -R 755 /var/cache/swift')
def install(self, *args, **kwargs):
"""
Install swift storage
:param address: the management interface ip for rsync
:param bind_ip: the management interface ip for swift storage binding
:returns: None
"""
return execute(self._install, *args, **kwargs)
@runs_once
def _create_account_builder_file(self, partitions, replicas, moving):
with cd('/etc/swift'):
sudo(
'swift-ring-builder account.builder create {0} {1} {2}'.format(partitions, replicas, moving))
def create_account_builder_file(self, *args, **kwargs):
"""
Create account ring
:param partitions: 2^10 (1024) maximum partitions e.g. `10`
:param replicas: 3 replicas of each object e.g. `3`
:param moving: 1 hour minimum time between moving a partition more than once e.g. `1`
:returns: None
"""
return execute(self._create_account_builder_file, *args, **kwargs)
@runs_once
def _account_builder_add(self, region, zone, ip, device, weight):
with cd('/etc/swift'):
sudo('swift-ring-builder account.builder add --region {0} --zone {1} --ip {2} --port 6002 --device {3} --weight {4}'.format(region,
zone,
ip,
device,
weight))
print red(env.host_string + ' | Verify the ring contents')
sudo('swift-ring-builder account.builder')
def account_builder_add(self, *args, **kwargs):
"""
Add each storage node to the account ring
:param region: swift storage region e.g. `1`
:param zone: swift storage zone e.g. `1`
:param ip: the IP address of the management network on the each storage node e.g. `STORAGE_NODE_IP`
:param device: a storage device name on the same storage node e.g. `sdb`
:param weight: the storage device weight e.g. `100`
:returns: None
"""
return execute(self._account_builder_add, *args, **kwargs)
@runs_once
def _account_builder_rebalance(self):
with cd('/etc/swift'):
print red(env.host_string + ' | Rebalance the ring')
sudo('swift-ring-builder account.builder rebalance')
def account_builder_rebalance(self):
"""
Rebalance account builder
:returns: None
"""
return execute(self._account_builder_rebalance)
@runs_once
def _create_container_builder_file(self, partitions, replicas, moving):
with cd('/etc/swift'):
sudo('swift-ring-builder container.builder create {0} {1} {2}'.format(
partitions, replicas, moving))
def create_container_builder_file(self, *args, **kwargs):
"""
Create container ring
:param partitions: 2^10 (1024) maximum partitions e.g. `10`
:param replicas: 3 replicas of each object e.g. `3`
:param moving: 1 hour minimum time between moving a partition more than once e.g. `1`
:returns: None
"""
return execute(self._create_container_builder_file, *args, **kwargs)
@runs_once
def _container_builder_add(self, region, zone, ip, device, weight):
with cd('/etc/swift'):
sudo('swift-ring-builder container.builder add --region {0} --zone {1} --ip {2} --port 6001 --device {3} --weight {4}'.format(region,
zone,
ip,
device,
weight))
print red(env.host_string + ' | Verify the ring contents')
sudo('swift-ring-builder container.builder')
def container_builder_add(self, *args, **kwargs):
"""
Add each storage node to the container ring
:param region: swift storage region e.g. `1`
:param zone: swift storage zone e.g. `1`
:param ip: the IP address of the management network on the storage node e.g. `STORAGE_NODE_IP`
:param device: a storage device name on the same storage node e.g. `sdb`
:param weight: the storage device weight e.g. `100`
:returns: None
"""
return execute(self._container_builder_add, *args, **kwargs)
@runs_once
def _container_builder_rebalance(self):
with cd('/etc/swift'):
print red(env.host_string + ' | Rebalance the ring')
sudo('swift-ring-builder container.builder rebalance')
def container_builder_rebalance(self):
"""
Rebalance container builder
:returns: None
"""
return execute(self._container_builder_rebalance)
@runs_once
def _create_object_builder_file(self, partitions, replicas, moving):
with cd('/etc/swift'):
sudo(
'swift-ring-builder object.builder create {0} {1} {2}'.format(partitions, replicas, moving))
def create_object_builder_file(self, *args, **kwargs):
"""
Create object ring
:param partitions: 2^10 (1024) maximum partitions e.g. `10`
:param replicas: 3 replicas of each object e.g. `3`
:param moving: 1 hour minimum time between moving a partition more than once e.g. `1`
:returns: None
"""
return execute(self._create_object_builder_file, *args, **kwargs)
@runs_once
def _object_builder_add(self, region, zone, ip, device, weight):
with cd('/etc/swift'):
sudo('swift-ring-builder object.builder add --region {0} --zone {1} --ip {2} --port 6000 --device {3} --weight {4}'.format(region,
zone,
ip,
device,
weight))
print red(env.host_string + ' | Verify the ring contents')
sudo('swift-ring-builder object.builder')
def object_builder_add(self, *args, **kwargs):
"""
Add each storage node to the object ring
:param region: swift storage region e.g. `1`
:param zone: swift storage zone e.g. `1`
:param ip: the IP address of the management network on the storage node e.g. `STORAGE_NODE_IP`
:param device: a storage device name on the same storage node e.g. `sdb`
:param weight: the storage device weight e.g. `100`
:returns: None
"""
return execute(self._object_builder_add, *args, **kwargs)
@runs_once
def _object_builder_rebalance(self):
with cd('/etc/swift'):
print red(env.host_string + ' | Rebalance the ring')
sudo('swift-ring-builder object.builder rebalance')
def object_builder_rebalance(self):
"""
Rebalance object builder
:returns: None
"""
return execute(self._object_builder_rebalance)
def _get_builder_file(self):
get('/etc/swift/account.ring.gz', './account.ring.gz')
get('/etc/swift/container.ring.gz', './container.ring.gz')
get('/etc/swift/object.ring.gz', './object.ring.gz')
def get_builder_file(self):
"""
Copy *.ring.gz to local
:returns: None
"""
return execute(self._get_builder_file)
def _sync_builder_file(self):
put('./account.ring.gz', '/etc/swift/account.ring.gz', use_sudo=True)
put('./container.ring.gz', '/etc/swift/container.ring.gz', use_sudo=True)
put('./object.ring.gz', '/etc/swift/object.ring.gz', use_sudo=True)
def sync_builder_file(self, hosts):
"""
Copy the account.ring.gz, container.ring.gz, and object.ring.gz files from local to the /etc/swift directory on each storage node and any additional nodes running the proxy service
:returns: None
"""
return execute(self._sync_builder_file, hosts=hosts)
``` |
{
"source": "jiasir/pytables",
"score": 2
} |
#### File: jiasir/pytables/pytables_iptc.py
```python
import pika
import iptc
connection = pika.BlockingConnection(pika.ConnectionParameters(host='192.168.20.10'))
channel = connection.channel()
channel.exchange_declare(exchange='ip', type='fanout')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='ip', queue=queue_name)
print ' [*] Waiting for ip. To exit press CTRL+C'
def deny_ip(body):
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule = iptc.Rule()
rule.in_interface = "eth+"
rule.src = "%r/255.255.255.0" % body
target = iptc.Target(rule, "DROP")
rule.target = target
chain.insert_rule(rule)
def callback(ch, method, properties, body):
deny_ip(body)
print " [x] Drop: %r" % (body,)
channel.basic_consume(callback, queue=queue_name, no_ack=True)
channel.start_consuming()
```
#### File: jiasir/pytables/pytables.py
```python
__author__ = 'jiasir'
import pika
import subprocess
import logging
import ConfigParser
config_file = ConfigParser.RawConfigParser(allow_no_value=True)
config_file.read('etc/pytables.conf')
connection = pika.BlockingConnection(pika.ConnectionParameters(host=config_file.get('DEFAULT', 'host')))
channel = connection.channel()
channel.exchange_declare(exchange=config_file.get('DEFAULT', 'exchange'), type='fanout')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='ip_exchange', queue=queue_name)
logger = logging.getLogger('pytables')
logging.basicConfig(filename='/var/log/pytables.log', level=config_file.get('DEFAULT', 'log_level'),
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
print ' [*] Waiting for ip. To exit press CTRL+C'
def deny_ip(body):
subprocess.call(["iptables", "-I", "INPUT", "-s", body, "-j" "DROP"])
d = {}
def callback(ch, method, properties, body):
if body not in d:
deny_ip(body)
d[body] = 1
logger.info(" [x] Drop: %r" % (body,))
channel.basic_consume(callback, queue=queue_name, no_ack=True)
channel.start_consuming()
``` |
{
"source": "jiasli/azure-cli",
"score": 2
} |
#### File: cli/core/_profile.py
```python
import os
import os.path
import sys
from copy import deepcopy
from enum import Enum
from azure.cli.core._session import ACCOUNT
from azure.cli.core.azclierror import AuthenticationError
from azure.cli.core.cloud import get_active_cloud, set_cloud_subscription
from azure.cli.core.util import in_cloud_console, can_launch_browser
from knack.log import get_logger
from knack.util import CLIError
logger = get_logger(__name__)
# Names below are used by azure-xplat-cli to persist account information into
# ~/.azure/azureProfile.json or osx/keychainer or windows secure storage,
# which azure-cli will share.
# Please do not rename them unless you know what you are doing.
_IS_DEFAULT_SUBSCRIPTION = 'isDefault'
_SUBSCRIPTION_ID = 'id'
_SUBSCRIPTION_NAME = 'name'
# Tenant of the token which is used to list the subscription
_TENANT_ID = 'tenantId'
# Home tenant of the subscription, which maps to tenantId in 'Subscriptions - List REST API'
# https://docs.microsoft.com/en-us/rest/api/resources/subscriptions/list
_HOME_TENANT_ID = 'homeTenantId'
_MANAGED_BY_TENANTS = 'managedByTenants'
_USER_ENTITY = 'user'
_USER_NAME = 'name'
_CLIENT_ID = 'clientId'
_CLOUD_SHELL_ID = 'cloudShellID'
_SUBSCRIPTIONS = 'subscriptions'
_INSTALLATION_ID = 'installationId'
_ENVIRONMENT_NAME = 'environmentName'
_STATE = 'state'
_USER_TYPE = 'type'
_USER = 'user'
_SERVICE_PRINCIPAL = 'servicePrincipal'
_SERVICE_PRINCIPAL_CERT_SN_ISSUER_AUTH = 'useCertSNIssuerAuth'
_TOKEN_ENTRY_USER_ID = 'userId'
_TOKEN_ENTRY_TOKEN_TYPE = 'tokenType'
_TENANT_LEVEL_ACCOUNT_NAME = 'N/A(tenant level account)'
_SYSTEM_ASSIGNED_IDENTITY = 'systemAssignedIdentity'
_USER_ASSIGNED_IDENTITY = 'userAssignedIdentity'
_ASSIGNED_IDENTITY_INFO = 'assignedIdentityInfo'
_AZ_LOGIN_MESSAGE = "Please run 'az login' to setup account."
def load_subscriptions(cli_ctx, all_clouds=False, refresh=False):
profile = Profile(cli_ctx=cli_ctx)
if refresh:
profile.refresh_accounts()
subscriptions = profile.load_cached_subscriptions(all_clouds)
return subscriptions
def _detect_adfs_authority(authority_url, tenant):
"""Prepare authority and tenant for Azure Identity with ADFS support.
If `authority_url` ends with '/adfs', `tenant` will be set to 'adfs'. For example:
'https://adfs.redmond.azurestack.corp.microsoft.com/adfs'
-> ('https://adfs.redmond.azurestack.corp.microsoft.com/', 'adfs')
"""
authority_url = authority_url.rstrip('/')
if authority_url.endswith('/adfs'):
authority_url = authority_url[:-len('/adfs')]
# The custom tenant is discarded in ADFS environment
tenant = 'adfs'
return authority_url, tenant
def get_credential_types(cli_ctx):
class CredentialType(Enum): # pylint: disable=too-few-public-methods
cloud = get_active_cloud(cli_ctx)
management = cli_ctx.cloud.endpoints.management
rbac = cli_ctx.cloud.endpoints.active_directory_graph_resource_id
return CredentialType
def _get_cloud_console_token_endpoint():
return os.environ.get('MSI_ENDPOINT')
def _attach_token_tenant(subscription, tenant):
"""Attach the token tenant ID to the subscription as tenant_id, so that CLI knows which token should be used
to access the subscription.
This function supports multiple APIs:
- v2016_06_01's Subscription doesn't have tenant_id
- v2019_11_01's Subscription has tenant_id representing the home tenant ID. It will mapped to home_tenant_id
"""
if hasattr(subscription, "tenant_id"):
setattr(subscription, 'home_tenant_id', subscription.tenant_id)
setattr(subscription, 'tenant_id', tenant)
# pylint: disable=too-many-lines,too-many-instance-attributes,unused-argument
class Profile:
def __init__(self, cli_ctx=None, storage=None):
"""Class to manage CLI's accounts (profiles) and identities (credentials).
:param cli_ctx: The CLI context
:param storage: A dict to store accounts, by default persisted to ~/.azure/azureProfile.json as JSON
"""
from azure.cli.core import get_default_cli
self.cli_ctx = cli_ctx or get_default_cli()
self._storage = storage or ACCOUNT
self._authority = self.cli_ctx.cloud.endpoints.active_directory
from .auth.util import resource_to_scopes
self._arm_scope = resource_to_scopes(self.cli_ctx.cloud.endpoints.active_directory_resource_id)
# pylint: disable=too-many-branches,too-many-statements,too-many-locals
def login(self,
interactive,
username,
password,
is_service_principal,
tenant,
scopes=None,
use_device_code=False,
allow_no_subscriptions=False,
use_cert_sn_issuer=None,
**kwargs):
"""
For service principal, `password` is a dict returned by ServicePrincipalAuth.build_credential
"""
if not scopes:
scopes = self._arm_scope
identity = _create_identity_instance(self.cli_ctx, self._authority, tenant_id=tenant)
user_identity = None
if interactive:
if not use_device_code and not can_launch_browser():
logger.info('No web browser is available. Fall back to device code.')
use_device_code = True
if use_device_code:
user_identity = identity.login_with_device_code(scopes=scopes, **kwargs)
else:
user_identity = identity.login_with_auth_code(scopes=scopes, **kwargs)
else:
if not is_service_principal:
user_identity = identity.login_with_username_password(username, password, scopes=scopes, **kwargs)
else:
identity.login_with_service_principal(username, password, scopes=scopes)
# We have finished login. Let's find all subscriptions.
if user_identity:
username = user_identity['username']
subscription_finder = SubscriptionFinder(self.cli_ctx)
# Create credentials
if user_identity:
credential = identity.get_user_credential(username)
else:
credential = identity.get_service_principal_credential(username)
if tenant:
subscriptions = subscription_finder.find_using_specific_tenant(tenant, credential)
else:
subscriptions = subscription_finder.find_using_common_tenant(username, credential)
if not subscriptions and not allow_no_subscriptions:
raise CLIError("No subscriptions found for {}.".format(username))
if allow_no_subscriptions:
t_list = [s.tenant_id for s in subscriptions]
bare_tenants = [t for t in subscription_finder.tenants if t not in t_list]
tenant_accounts = self._build_tenant_level_accounts(bare_tenants)
subscriptions.extend(tenant_accounts)
if not subscriptions:
return []
consolidated = self._normalize_properties(username, subscriptions,
is_service_principal, bool(use_cert_sn_issuer))
self._set_subscriptions(consolidated)
return deepcopy(consolidated)
def login_with_managed_identity(self, identity_id=None, allow_no_subscriptions=None):
import jwt
from msrestazure.tools import is_valid_resource_id
from azure.cli.core.auth.adal_authentication import MSIAuthenticationWrapper
resource = self.cli_ctx.cloud.endpoints.active_directory_resource_id
if identity_id:
if is_valid_resource_id(identity_id):
msi_creds = MSIAuthenticationWrapper(resource=resource, msi_res_id=identity_id)
identity_type = MsiAccountTypes.user_assigned_resource_id
else:
authenticated = False
from azure.cli.core.azclierror import AzureResponseError
try:
msi_creds = MSIAuthenticationWrapper(resource=resource, client_id=identity_id)
identity_type = MsiAccountTypes.user_assigned_client_id
authenticated = True
except AzureResponseError as ex:
if 'http error: 400, reason: Bad Request' in ex.error_msg:
logger.info('Sniff: not an MSI client id')
else:
raise
if not authenticated:
try:
identity_type = MsiAccountTypes.user_assigned_object_id
msi_creds = MSIAuthenticationWrapper(resource=resource, object_id=identity_id)
authenticated = True
except AzureResponseError as ex:
if 'http error: 400, reason: Bad Request' in ex.error_msg:
logger.info('Sniff: not an MSI object id')
else:
raise
if not authenticated:
raise CLIError('Failed to connect to MSI, check your managed service identity id.')
else:
identity_type = MsiAccountTypes.system_assigned
msi_creds = MSIAuthenticationWrapper(resource=resource)
token_entry = msi_creds.token
token = token_entry['access_token']
logger.info('MSI: token was retrieved. Now trying to initialize local accounts...')
decode = jwt.decode(token, algorithms=['RS256'], options={"verify_signature": False})
tenant = decode['tid']
subscription_finder = SubscriptionFinder(self.cli_ctx)
subscriptions = subscription_finder.find_using_specific_tenant(tenant, msi_creds)
base_name = ('{}-{}'.format(identity_type, identity_id) if identity_id else identity_type)
user = _USER_ASSIGNED_IDENTITY if identity_id else _SYSTEM_ASSIGNED_IDENTITY
if not subscriptions:
if allow_no_subscriptions:
subscriptions = self._build_tenant_level_accounts([tenant])
else:
raise CLIError('No access was configured for the VM, hence no subscriptions were found. '
"If this is expected, use '--allow-no-subscriptions' to have tenant level access.")
consolidated = self._normalize_properties(user, subscriptions, is_service_principal=True,
user_assigned_identity_id=base_name)
self._set_subscriptions(consolidated)
return deepcopy(consolidated)
def login_in_cloud_shell(self):
import jwt
from azure.cli.core.auth.adal_authentication import MSIAuthenticationWrapper
msi_creds = MSIAuthenticationWrapper(resource=self.cli_ctx.cloud.endpoints.active_directory_resource_id)
token_entry = msi_creds.token
token = token_entry['access_token']
logger.info('MSI: token was retrieved. Now trying to initialize local accounts...')
decode = jwt.decode(token, algorithms=['RS256'], options={"verify_signature": False})
tenant = decode['tid']
subscription_finder = SubscriptionFinder(self.cli_ctx)
subscriptions = subscription_finder.find_using_specific_tenant(tenant, msi_creds)
if not subscriptions:
raise CLIError('No subscriptions were found in the cloud shell')
user = decode.get('unique_name', 'N/A')
consolidated = self._normalize_properties(user, subscriptions, is_service_principal=False)
for s in consolidated:
s[_USER_ENTITY][_CLOUD_SHELL_ID] = True
self._set_subscriptions(consolidated)
return deepcopy(consolidated)
def logout(self, user_or_sp):
subscriptions = self.load_cached_subscriptions(all_clouds=True)
result = [x for x in subscriptions
if user_or_sp.lower() == x[_USER_ENTITY][_USER_NAME].lower()]
subscriptions = [x for x in subscriptions if x not in result]
self._storage[_SUBSCRIPTIONS] = subscriptions
identity = _create_identity_instance(self.cli_ctx, self._authority)
identity.logout_user(user_or_sp)
identity.logout_service_principal(user_or_sp)
def logout_all(self):
self._storage[_SUBSCRIPTIONS] = []
identity = _create_identity_instance(self.cli_ctx, self._authority)
identity.logout_all_users()
identity.logout_all_service_principal()
def get_login_credentials(self, resource=None, client_id=None, subscription_id=None, aux_subscriptions=None,
aux_tenants=None):
"""Get a CredentialAdaptor instance to be used with both Track 1 and Track 2 SDKs.
:param resource: The resource ID to acquire an access token. Only provide it for Track 1 SDKs.
:param client_id:
:param subscription_id:
:param aux_subscriptions:
:param aux_tenants:
"""
resource = resource or self.cli_ctx.cloud.endpoints.active_directory_resource_id
if aux_tenants and aux_subscriptions:
raise CLIError("Please specify only one of aux_subscriptions and aux_tenants, not both")
account = self.get_subscription(subscription_id)
managed_identity_type, managed_identity_id = Profile._try_parse_msi_account_name(account)
# Cloud Shell is just a system assignment managed identity
if in_cloud_console() and account[_USER_ENTITY].get(_CLOUD_SHELL_ID):
managed_identity_type = MsiAccountTypes.system_assigned
if managed_identity_type is None:
# user and service principal
external_tenants = []
if aux_tenants:
external_tenants = [tenant for tenant in aux_tenants if tenant != account[_TENANT_ID]]
if aux_subscriptions:
ext_subs = [aux_sub for aux_sub in aux_subscriptions if aux_sub != subscription_id]
for ext_sub in ext_subs:
sub = self.get_subscription(ext_sub)
if sub[_TENANT_ID] != account[_TENANT_ID]:
external_tenants.append(sub[_TENANT_ID])
credential = self._create_credential(account, client_id=client_id)
external_credentials = []
for external_tenant in external_tenants:
external_credentials.append(self._create_credential(account, external_tenant, client_id=client_id))
from azure.cli.core.auth.credential_adaptor import CredentialAdaptor
cred = CredentialAdaptor(credential,
auxiliary_credentials=external_credentials,
resource=resource)
else:
# managed identity
cred = MsiAccountTypes.msi_auth_factory(managed_identity_type, managed_identity_id, resource)
return (cred,
str(account[_SUBSCRIPTION_ID]),
str(account[_TENANT_ID]))
def get_raw_token(self, resource=None, scopes=None, subscription=None, tenant=None):
# Convert resource to scopes
if resource and not scopes:
from .auth.util import resource_to_scopes
scopes = resource_to_scopes(resource)
# Use ARM as the default scopes
if not scopes:
scopes = self._arm_scope
if subscription and tenant:
raise CLIError("Please specify only one of subscription and tenant, not both")
account = self.get_subscription(subscription)
identity_type, identity_id = Profile._try_parse_msi_account_name(account)
if identity_type:
# managed identity
if tenant:
raise CLIError("Tenant shouldn't be specified for managed identity account")
from .auth.util import scopes_to_resource
msi_creds = MsiAccountTypes.msi_auth_factory(identity_type, identity_id,
scopes_to_resource(scopes))
sdk_token = msi_creds.get_token(*scopes)
elif in_cloud_console() and account[_USER_ENTITY].get(_CLOUD_SHELL_ID):
# Cloud Shell, which is just a system-assigned managed identity.
if tenant:
raise CLIError("Tenant shouldn't be specified for Cloud Shell account")
from .auth.util import scopes_to_resource
msi_creds = MsiAccountTypes.msi_auth_factory(MsiAccountTypes.system_assigned, identity_id,
scopes_to_resource(scopes))
sdk_token = msi_creds.get_token(*scopes)
else:
credential = self._create_credential(account, tenant)
sdk_token = credential.get_token(*scopes)
# Convert epoch int 'expires_on' to datetime string 'expiresOn' for backward compatibility
# WARNING: expiresOn is deprecated and will be removed in future release.
import datetime
expiresOn = datetime.datetime.fromtimestamp(sdk_token.expires_on).strftime("%Y-%m-%d %H:%M:%S.%f")
token_entry = {
'accessToken': sdk_token.token,
'expires_on': sdk_token.expires_on, # epoch int, like 1605238724
'expiresOn': expiresOn # datetime string, like "2020-11-12 13:50:47.114324"
}
# (tokenType, accessToken, tokenEntry)
creds = 'Bearer', sdk_token.token, token_entry
# (cred, subscription, tenant)
return (creds,
None if tenant else str(account[_SUBSCRIPTION_ID]),
str(tenant if tenant else account[_TENANT_ID]))
def _normalize_properties(self, user, subscriptions, is_service_principal, cert_sn_issuer_auth=None,
user_assigned_identity_id=None):
consolidated = []
for s in subscriptions:
subscription_dict = {
_SUBSCRIPTION_ID: s.id.rpartition('/')[2],
_SUBSCRIPTION_NAME: s.display_name,
_STATE: s.state,
_USER_ENTITY: {
_USER_NAME: user,
_USER_TYPE: _SERVICE_PRINCIPAL if is_service_principal else _USER
},
_IS_DEFAULT_SUBSCRIPTION: False,
_TENANT_ID: s.tenant_id,
_ENVIRONMENT_NAME: self.cli_ctx.cloud.name
}
if subscription_dict[_SUBSCRIPTION_NAME] != _TENANT_LEVEL_ACCOUNT_NAME:
_transform_subscription_for_multiapi(s, subscription_dict)
consolidated.append(subscription_dict)
if cert_sn_issuer_auth:
consolidated[-1][_USER_ENTITY][_SERVICE_PRINCIPAL_CERT_SN_ISSUER_AUTH] = True
if user_assigned_identity_id:
consolidated[-1][_USER_ENTITY][_ASSIGNED_IDENTITY_INFO] = user_assigned_identity_id
return consolidated
def _build_tenant_level_accounts(self, tenants):
result = []
for t in tenants:
s = self._new_account()
s.id = '/subscriptions/' + t
s.subscription = t
s.tenant_id = t
s.display_name = _TENANT_LEVEL_ACCOUNT_NAME
result.append(s)
return result
def _new_account(self):
"""Build an empty Subscription which will be used as a tenant account.
API version doesn't matter as only specified attributes are preserved by _normalize_properties."""
from azure.cli.core.profiles import ResourceType, get_sdk
SubscriptionType = get_sdk(self.cli_ctx, ResourceType.MGMT_RESOURCE_SUBSCRIPTIONS,
'Subscription', mod='models')
s = SubscriptionType()
s.state = 'Enabled'
return s
def _set_subscriptions(self, new_subscriptions, merge=True, secondary_key_name=None):
def _get_key_name(account, secondary_key_name):
return (account[_SUBSCRIPTION_ID] if secondary_key_name is None
else '{}-{}'.format(account[_SUBSCRIPTION_ID], account[secondary_key_name]))
def _match_account(account, subscription_id, secondary_key_name, secondary_key_val):
return (account[_SUBSCRIPTION_ID] == subscription_id and
(secondary_key_val is None or account[secondary_key_name] == secondary_key_val))
existing_ones = self.load_cached_subscriptions(all_clouds=True)
active_one = next((x for x in existing_ones if x.get(_IS_DEFAULT_SUBSCRIPTION)), None)
active_subscription_id = active_one[_SUBSCRIPTION_ID] if active_one else None
active_secondary_key_val = active_one[secondary_key_name] if (active_one and secondary_key_name) else None
active_cloud = self.cli_ctx.cloud
default_sub_id = None
# merge with existing ones
if merge:
dic = {_get_key_name(x, secondary_key_name): x for x in existing_ones}
else:
dic = {}
dic.update((_get_key_name(x, secondary_key_name), x) for x in new_subscriptions)
subscriptions = list(dic.values())
if subscriptions:
if active_one:
new_active_one = next(
(x for x in new_subscriptions if _match_account(x, active_subscription_id, secondary_key_name,
active_secondary_key_val)), None)
for s in subscriptions:
s[_IS_DEFAULT_SUBSCRIPTION] = False
if not new_active_one:
new_active_one = Profile._pick_working_subscription(new_subscriptions)
else:
new_active_one = Profile._pick_working_subscription(new_subscriptions)
new_active_one[_IS_DEFAULT_SUBSCRIPTION] = True
default_sub_id = new_active_one[_SUBSCRIPTION_ID]
set_cloud_subscription(self.cli_ctx, active_cloud.name, default_sub_id)
self._storage[_SUBSCRIPTIONS] = subscriptions
@staticmethod
def _pick_working_subscription(subscriptions):
s = next((x for x in subscriptions if x.get(_STATE) == 'Enabled'), None)
return s or subscriptions[0]
def is_tenant_level_account(self):
return self.get_subscription()[_SUBSCRIPTION_NAME] == _TENANT_LEVEL_ACCOUNT_NAME
def set_active_subscription(self, subscription): # take id or name
subscriptions = self.load_cached_subscriptions(all_clouds=True)
active_cloud = self.cli_ctx.cloud
subscription = subscription.lower()
result = [x for x in subscriptions
if subscription in [x[_SUBSCRIPTION_ID].lower(),
x[_SUBSCRIPTION_NAME].lower()] and
x[_ENVIRONMENT_NAME] == active_cloud.name]
if len(result) != 1:
raise CLIError("The subscription of '{}' {} in cloud '{}'.".format(
subscription, "doesn't exist" if not result else 'has more than one match', active_cloud.name))
for s in subscriptions:
s[_IS_DEFAULT_SUBSCRIPTION] = False
result[0][_IS_DEFAULT_SUBSCRIPTION] = True
set_cloud_subscription(self.cli_ctx, active_cloud.name, result[0][_SUBSCRIPTION_ID])
self._storage[_SUBSCRIPTIONS] = subscriptions
def load_cached_subscriptions(self, all_clouds=False):
subscriptions = self._storage.get(_SUBSCRIPTIONS) or []
active_cloud = self.cli_ctx.cloud
cached_subscriptions = [sub for sub in subscriptions
if all_clouds or sub[_ENVIRONMENT_NAME] == active_cloud.name]
# use deepcopy as we don't want to persist these changes to file.
return deepcopy(cached_subscriptions)
def get_current_account_user(self):
try:
active_account = self.get_subscription()
except CLIError:
raise CLIError('There are no active accounts.')
return active_account[_USER_ENTITY][_USER_NAME]
def get_subscription(self, subscription=None): # take id or name
subscriptions = self.load_cached_subscriptions()
if not subscriptions:
raise CLIError(_AZ_LOGIN_MESSAGE)
result = [x for x in subscriptions if (
not subscription and x.get(_IS_DEFAULT_SUBSCRIPTION) or
subscription and subscription.lower() in [x[_SUBSCRIPTION_ID].lower(), x[
_SUBSCRIPTION_NAME].lower()])]
if not result and subscription:
raise CLIError("Subscription '{}' not found. "
"Check the spelling and casing and try again.".format(subscription))
if not result and not subscription:
raise CLIError("No subscription found. Run 'az account set' to select a subscription.")
if len(result) > 1:
raise CLIError("Multiple subscriptions with the name '{}' found. "
"Specify the subscription ID.".format(subscription))
return result[0]
def get_subscription_id(self, subscription=None): # take id or name
return self.get_subscription(subscription)[_SUBSCRIPTION_ID]
@staticmethod
def _try_parse_msi_account_name(account):
msi_info, user = account[_USER_ENTITY].get(_ASSIGNED_IDENTITY_INFO), account[_USER_ENTITY].get(_USER_NAME)
if user in [_SYSTEM_ASSIGNED_IDENTITY, _USER_ASSIGNED_IDENTITY]:
if not msi_info:
msi_info = account[_SUBSCRIPTION_NAME] # fall back to old persisting way
parts = msi_info.split('-', 1)
if parts[0] in MsiAccountTypes.valid_msi_account_types():
return parts[0], (None if len(parts) <= 1 else parts[1])
return None, None
def _create_credential(self, account, tenant_id=None, client_id=None):
"""Create a credential object driven by MSAL
:param account:
:param tenant_id: If not None, override tenantId from 'account'
:param client_id:
:return:
"""
user_type = account[_USER_ENTITY][_USER_TYPE]
username_or_sp_id = account[_USER_ENTITY][_USER_NAME]
tenant_id = tenant_id if tenant_id else account[_TENANT_ID]
identity = _create_identity_instance(self.cli_ctx, self._authority, tenant_id=tenant_id, client_id=client_id)
# User
if user_type == _USER:
return identity.get_user_credential(username_or_sp_id)
# Service Principal
if user_type == _SERVICE_PRINCIPAL:
return identity.get_service_principal_credential(username_or_sp_id)
raise NotImplementedError
def refresh_accounts(self):
subscriptions = self.load_cached_subscriptions()
to_refresh = subscriptions
subscription_finder = SubscriptionFinder(self.cli_ctx)
refreshed_list = set()
result = []
for s in to_refresh:
user_name = s[_USER_ENTITY][_USER_NAME]
if user_name in refreshed_list:
continue
refreshed_list.add(user_name)
is_service_principal = (s[_USER_ENTITY][_USER_TYPE] == _SERVICE_PRINCIPAL)
tenant = s[_TENANT_ID]
subscriptions = []
try:
identity_credential = self._create_credential(s, tenant)
if is_service_principal:
subscriptions = subscription_finder.find_using_specific_tenant(tenant, identity_credential)
else:
# pylint: disable=protected-access
subscriptions = subscription_finder.find_using_common_tenant(user_name, identity_credential)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Refreshing for '%s' failed with an error '%s'. The existing accounts were not "
"modified. You can run 'az login' later to explicitly refresh them", user_name, ex)
result += deepcopy([r for r in to_refresh if r[_USER_ENTITY][_USER_NAME] == user_name])
continue
if not subscriptions:
if s[_SUBSCRIPTION_NAME] == _TENANT_LEVEL_ACCOUNT_NAME:
subscriptions = self._build_tenant_level_accounts([s[_TENANT_ID]])
if not subscriptions:
continue
consolidated = self._normalize_properties(user_name,
subscriptions,
is_service_principal)
result += consolidated
self._set_subscriptions(result, merge=False)
def get_sp_auth_info(self, subscription_id=None, name=None, password=<PASSWORD>, cert_file=None):
"""Generate a JSON for --sdk-auth argument when used in:
- az ad sp create-for-rbac --sdk-auth
- az account show --sdk-auth
"""
from collections import OrderedDict
account = self.get_subscription(subscription_id)
# is the credential created through command like 'create-for-rbac'?
result = OrderedDict()
if name and (password or cert_file):
result['clientId'] = name
if password:
result['clientSecret'] = password
else:
result['clientCertificate'] = cert_file
result['subscriptionId'] = subscription_id or account[_SUBSCRIPTION_ID]
else: # has logged in through cli
user_type = account[_USER_ENTITY].get(_USER_TYPE)
if user_type == _SERVICE_PRINCIPAL:
client_id = account[_USER_ENTITY][_USER_NAME]
result['clientId'] = client_id
identity = _create_identity_instance(self.cli_ctx, self._authority, tenant_id=account[_TENANT_ID])
sp_entry = identity.get_service_principal_entry(client_id)
from .auth.msal_authentication import _CLIENT_SECRET, _CERTIFICATE
secret = sp_entry.get(_CLIENT_SECRET)
if secret:
result['clientSecret'] = secret
else:
# we can output 'clientCertificateThumbprint' if asked
result['clientCertificate'] = sp_entry.get(_CERTIFICATE)
result['subscriptionId'] = account[_SUBSCRIPTION_ID]
else:
raise CLIError('SDK Auth file is only applicable when authenticated using a service principal')
result[_TENANT_ID] = account[_TENANT_ID]
endpoint_mappings = OrderedDict() # use OrderedDict to control the output sequence
endpoint_mappings['active_directory'] = 'activeDirectoryEndpointUrl'
endpoint_mappings['resource_manager'] = 'resourceManagerEndpointUrl'
endpoint_mappings['active_directory_graph_resource_id'] = 'activeDirectoryGraphResourceId'
endpoint_mappings['sql_management'] = 'sqlManagementEndpointUrl'
endpoint_mappings['gallery'] = 'galleryEndpointUrl'
endpoint_mappings['management'] = 'managementEndpointUrl'
from azure.cli.core.cloud import CloudEndpointNotSetException
for e in endpoint_mappings:
try:
result[endpoint_mappings[e]] = getattr(get_active_cloud(self.cli_ctx).endpoints, e)
except CloudEndpointNotSetException:
result[endpoint_mappings[e]] = None
return result
def get_installation_id(self):
installation_id = self._storage.get(_INSTALLATION_ID)
if not installation_id:
import uuid
installation_id = str(uuid.uuid1())
self._storage[_INSTALLATION_ID] = installation_id
return installation_id
class MsiAccountTypes:
# pylint: disable=no-method-argument,no-self-argument
system_assigned = 'MSI'
user_assigned_client_id = 'MSIClient'
user_assigned_object_id = 'MSIObject'
user_assigned_resource_id = 'MSIResource'
@staticmethod
def valid_msi_account_types():
return [MsiAccountTypes.system_assigned, MsiAccountTypes.user_assigned_client_id,
MsiAccountTypes.user_assigned_object_id, MsiAccountTypes.user_assigned_resource_id]
@staticmethod
def msi_auth_factory(cli_account_name, identity, resource):
from azure.cli.core.auth.adal_authentication import MSIAuthenticationWrapper
if cli_account_name == MsiAccountTypes.system_assigned:
return MSIAuthenticationWrapper(resource=resource)
if cli_account_name == MsiAccountTypes.user_assigned_client_id:
return MSIAuthenticationWrapper(resource=resource, client_id=identity)
if cli_account_name == MsiAccountTypes.user_assigned_object_id:
return MSIAuthenticationWrapper(resource=resource, object_id=identity)
if cli_account_name == MsiAccountTypes.user_assigned_resource_id:
return MSIAuthenticationWrapper(resource=resource, msi_res_id=identity)
raise ValueError("unrecognized msi account name '{}'".format(cli_account_name))
class SubscriptionFinder:
# An ARM client. It finds subscriptions for a user or service principal. It shouldn't do any
# authentication work, but only find subscriptions
def __init__(self, cli_ctx):
self.cli_ctx = cli_ctx
self.secret = None
self._arm_resource_id = cli_ctx.cloud.endpoints.active_directory_resource_id
self._authority = self.cli_ctx.cloud.endpoints.active_directory
self.tenants = []
def find_using_common_tenant(self, username, credential=None):
# pylint: disable=too-many-statements
all_subscriptions = []
empty_tenants = []
mfa_tenants = []
client = self._create_subscription_client(credential)
tenants = client.tenants.list()
for t in tenants:
tenant_id = t.tenant_id
# display_name is available since /tenants?api-version=2018-06-01,
# not available in /tenants?api-version=2016-06-01
if not hasattr(t, 'display_name'):
t.display_name = None
t.tenant_id_name = tenant_id
if t.display_name:
# e.g. '72f988bf-86f1-41af-91ab-2d7cd011db47 Microsoft'
t.tenant_id_name = "{} '{}'".format(tenant_id, t.display_name)
logger.info("Finding subscriptions under tenant %s", t.tenant_id_name)
identity = _create_identity_instance(self.cli_ctx, self._authority, tenant_id=tenant_id)
specific_tenant_credential = identity.get_user_credential(username)
try:
subscriptions = self.find_using_specific_tenant(tenant_id, specific_tenant_credential)
except AuthenticationError as ex:
# because user creds went through the 'common' tenant, the error here must be
# tenant specific, like the account was disabled. For such errors, we will continue
# with other tenants.
msg = ex.error_msg
if 'AADSTS50076' in msg:
# The tenant requires MFA and can't be accessed with home tenant's refresh token
mfa_tenants.append(t)
else:
logger.warning("Failed to authenticate '%s' due to error '%s'", t, ex)
continue
if not subscriptions:
empty_tenants.append(t)
# When a subscription can be listed by multiple tenants, only the first appearance is retained
for sub_to_add in subscriptions:
add_sub = True
for sub_to_compare in all_subscriptions:
if sub_to_add.subscription_id == sub_to_compare.subscription_id:
logger.warning("Subscription %s '%s' can be accessed from tenants %s(default) and %s. "
"To select a specific tenant when accessing this subscription, "
"use 'az login --tenant TENANT_ID'.",
sub_to_add.subscription_id, sub_to_add.display_name,
sub_to_compare.tenant_id, sub_to_add.tenant_id)
add_sub = False
break
if add_sub:
all_subscriptions.append(sub_to_add)
# Show warning for empty tenants
if empty_tenants:
logger.warning("The following tenants don't contain accessible subscriptions. "
"Use 'az login --allow-no-subscriptions' to have tenant level access.")
for t in empty_tenants:
logger.warning("%s", t.tenant_id_name)
# Show warning for MFA tenants
if mfa_tenants:
logger.warning("The following tenants require Multi-Factor Authentication (MFA). "
"Use 'az login --tenant TENANT_ID' to explicitly login to a tenant.")
for t in mfa_tenants:
logger.warning("%s", t.tenant_id_name)
return all_subscriptions
def find_using_specific_tenant(self, tenant, credential):
client = self._create_subscription_client(credential)
subscriptions = client.subscriptions.list()
all_subscriptions = []
for s in subscriptions:
_attach_token_tenant(s, tenant)
all_subscriptions.append(s)
self.tenants.append(tenant)
return all_subscriptions
def _create_subscription_client(self, credential):
from azure.cli.core.profiles import ResourceType, get_api_version
from azure.cli.core.profiles._shared import get_client_class
from azure.cli.core.commands.client_factory import _prepare_mgmt_client_kwargs_track2
client_type = get_client_class(ResourceType.MGMT_RESOURCE_SUBSCRIPTIONS)
if client_type is None:
from azure.cli.core.azclierror import CLIInternalError
raise CLIInternalError("Unable to get '{}' in profile '{}'"
.format(ResourceType.MGMT_RESOURCE_SUBSCRIPTIONS, self.cli_ctx.cloud.profile))
api_version = get_api_version(self.cli_ctx, ResourceType.MGMT_RESOURCE_SUBSCRIPTIONS)
client_kwargs = _prepare_mgmt_client_kwargs_track2(self.cli_ctx, credential)
client = client_type(credential, api_version=api_version,
base_url=self.cli_ctx.cloud.endpoints.resource_manager,
**client_kwargs)
return client
def _transform_subscription_for_multiapi(s, s_dict):
"""
Transforms properties from Subscriptions - List 2019-06-01 and later to the subscription dict.
:param s: subscription object
:param s_dict: subscription dict
"""
if hasattr(s, 'home_tenant_id'):
s_dict[_HOME_TENANT_ID] = s.home_tenant_id
if hasattr(s, 'managed_by_tenants'):
if s.managed_by_tenants is None:
s_dict[_MANAGED_BY_TENANTS] = None
else:
s_dict[_MANAGED_BY_TENANTS] = [{_TENANT_ID: t.tenant_id} for t in s.managed_by_tenants]
def _create_identity_instance(cli_ctx, *args, **kwargs):
"""Lazily import and create Identity instance to avoid unnecessary imports."""
from .auth.identity import Identity
# Only enable encryption for Windows (for now).
fallback = sys.platform.startswith('win32') or sys.platform.startswith('darwin')
# encrypt_token_cache affects both MSAL token cache and service principal entries.
encrypt = cli_ctx.config.getboolean('core', 'encrypt_token_cache', fallback=fallback)
return Identity(*args, encrypt=encrypt, **kwargs)
``` |
{
"source": "jiasli/azure-sdk-for-python",
"score": 2
} |
#### File: cosmos/_routing/routing_range.py
```python
class PartitionKeyRange(object):
"""Partition Key Range Constants"""
MinInclusive = 'minInclusive'
MaxExclusive = 'maxExclusive'
Id = 'id'
Parents = 'parents'
class Range(object):
"""description of class"""
MinPath = 'min'
MaxPath = 'max'
IsMinInclusivePath = 'isMinInclusive'
IsMaxInclusivePath = 'isMaxInclusive'
def __init__(self, range_min, range_max, isMinInclusive, isMaxInclusive):
if range_min is None:
raise ValueError("min is missing")
if range_max is None:
raise ValueError("max is missing")
self.min = range_min
self.max = range_max
self.isMinInclusive = isMinInclusive
self.isMaxInclusive = isMaxInclusive
def contains(self, value):
minToValueRelation = self.min > value
maxToValueRelation = self.max > value
return ((self.isMinInclusive and minToValueRelation <= 0) or \
(not self.isMinInclusive and minToValueRelation < 0)) \
and ((self.isMaxInclusive and maxToValueRelation >= 0) \
or (not self.isMaxInclusive and maxToValueRelation > 0))
@classmethod
def PartitionKeyRangeToRange(cls, partition_key_range):
self = cls(partition_key_range[PartitionKeyRange.MinInclusive], partition_key_range[PartitionKeyRange.MaxExclusive],
True, False)
return self
@classmethod
def ParseFromDict(cls, range_as_dict):
self = cls(range_as_dict[Range.MinPath], range_as_dict[Range.MaxPath], range_as_dict[Range.IsMinInclusivePath], range_as_dict[Range.IsMaxInclusivePath])
return self
def isSingleValue(self):
return self.isMinInclusive and self.isMaxInclusive and self.min == self.max
def isEmpty(self):
return (not (self.isMinInclusive and self.isMaxInclusive)) and self.min == self.max
def __hash__(self):
return hash((self.min, self.max, self.isMinInclusive, self.isMaxInclusive))
def __str__(self):
return (('[' if self.isMinInclusive else '(') + str(self.min) + ',' + str(self.max) + (']' if self.isMaxInclusive else ')'))
def __eq__(self, other):
return (self.min == other.min) and (self.max == other.max) \
and (self.isMinInclusive == other.isMinInclusive) \
and (self.isMaxInclusive == other.isMaxInclusive)
@staticmethod
def _compare_helper(a,b):
# python 3 compatible
return (a > b) - (a < b)
@staticmethod
def overlaps(range1, range2):
if range1 is None or range2 is None: return False
if range1.isEmpty() or range2.isEmpty(): return False
cmp1 = Range._compare_helper(range1.min, range2.max)
cmp2 = Range._compare_helper(range2.min, range1.max)
if (cmp1 <= 0 or cmp2 <= 0):
if ((cmp1 == 0 and not(range1.isMinInclusive and range2.isMaxInclusive)) or (cmp2 == 0 and not(range2.isMinInclusive and range1.isMaxInclusive))):
return False
return True
return False
```
#### File: azure/cosmos/_session.py
```python
import sys, traceback
import threading
from . import _base
from . import http_constants
from ._vector_session_token import VectorSessionToken
from .errors import HTTPFailure
class SessionContainer(object):
def __init__(self):
self.collection_name_to_rid = {}
self.rid_to_session_token = {}
self.session_lock = threading.RLock()
def get_session_token(self, resource_path):
"""
Get Session Token for collection_link
:param str resource_path:
Self link / path to the resource
:return:
Session Token dictionary for the collection_id
:rtype:
dict
"""
with self.session_lock:
is_name_based = _base.IsNameBased(resource_path)
collection_rid = ''
session_token = ''
try:
if is_name_based:
# get the collection name
collection_name = _base.GetItemContainerLink(resource_path)
collection_rid = self.collection_name_to_rid[collection_name]
else:
collection_rid = _base.GetItemContainerLink(resource_path)
if collection_rid in self.rid_to_session_token:
token_dict = self.rid_to_session_token[collection_rid]
session_token_list = []
for key in token_dict.keys():
session_token_list.append("{0}:{1}".format(key, token_dict[key].convert_to_string()))
session_token = ','.join(session_token_list)
return session_token
else:
# return empty token if not found
return ''
except:
return ''
def set_session_token(self, response_result, response_headers):
"""
Session token must only be updated from response of requests that successfully mutate resource on the
server side (write, replace, delete etc)
:param dict response_result:
:param dict response_headers:
:return:
- None
"""
''' there are two pieces of information that we need to update session token-
self link which has the rid representation of the resource, and
x-ms-alt-content-path which is the string representation of the resource'''
with self.session_lock:
collection_rid = ''
collection_name = ''
try:
self_link = response_result['_self']
''' extract alternate content path from the response_headers
(only document level resource updates will have this),
and if not present, then we can assume that we don't have to update
session token for this request'''
alt_content_path = ''
alt_content_path_key = http_constants.HttpHeaders.AlternateContentPath
response_result_id_key = u'id'
response_result_id = None
if alt_content_path_key in response_headers:
alt_content_path = response_headers[http_constants.HttpHeaders.AlternateContentPath]
response_result_id = response_result[response_result_id_key]
else:
return
collection_rid, collection_name = _base.GetItemContainerInfo(self_link, alt_content_path, response_result_id)
except ValueError:
return
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
return
if collection_name in self.collection_name_to_rid:
''' check if the rid for the collection name has changed
this means that potentially, the collection was deleted
and recreated
'''
existing_rid = self.collection_name_to_rid[collection_name]
if (collection_rid != existing_rid):
''' flush the session tokens for the old rid, and
update the new rid into the collection name to rid map.
'''
self.rid_to_session_token[existing_rid] = {}
self.collection_name_to_rid[collection_name] = collection_rid
# parse session token
parsed_tokens = self.parse_session_token(response_headers)
# update session token in collection rid to session token map
if collection_rid in self.rid_to_session_token:
''' we need to update the session tokens for 'this' collection
'''
for id in parsed_tokens:
old_session_token = self.rid_to_session_token[collection_rid][id] if id in self.rid_to_session_token[collection_rid] else None
if not old_session_token:
self.rid_to_session_token[collection_rid][id] = parsed_tokens[id]
else:
self.rid_to_session_token[collection_rid][id] = parsed_tokens[id].merge(old_session_token)
self.collection_name_to_rid[collection_name] = collection_rid
else:
self.rid_to_session_token[collection_rid] = parsed_tokens
self.collection_name_to_rid[collection_name] = collection_rid
def clear_session_token(self, response_headers):
with self.session_lock:
collection_rid = ''
alt_content_path = ''
alt_content_path_key = http_constants.HttpHeaders.AlternateContentPath
if alt_content_path_key in response_headers:
alt_content_path = response_headers[http_constants.HttpHeaders.AlternateContentPath]
if alt_content_path in self.collection_name_to_rid:
collection_rid = self.collection_name_to_rid[alt_content_path]
del self.collection_name_to_rid[alt_content_path]
del self.rid_to_session_token[collection_rid]
@staticmethod
def parse_session_token(response_headers):
""" Extracts session token from response headers and parses
:param dict response_headers:
:return:
A dictionary of partition id to session lsn
for given collection
:rtype: dict
"""
# extract session token from response header
session_token = ''
if http_constants.HttpHeaders.SessionToken in response_headers:
session_token = response_headers[http_constants.HttpHeaders.SessionToken]
id_to_sessionlsn = {}
if session_token is not '':
''' extract id, lsn from the token. For p-collection,
the token will be a concatenation of pairs for each collection'''
token_pairs = session_token.split(',')
for token_pair in token_pairs:
tokens = token_pair.split(':')
if (len(tokens) == 2):
id = tokens[0]
sessionToken = VectorSessionToken.create(tokens[1])
if sessionToken is None:
raise HTTPFailure(http_constants.StatusCodes.INTERNAL_SERVER_ERROR, "Could not parse the received session token: %s" % tokens[1])
id_to_sessionlsn[id] = sessionToken
return id_to_sessionlsn
class Session:
"""
State of a Azure Cosmos session. This session object
can be shared across clients within the same process
"""
def __init__(self, url_connection):
self.url_connection = url_connection
self.session_container = SessionContainer()
#include creation time, and some other stats
def clear_session_token(self, response_headers):
self.session_container.clear_session_token(response_headers)
def update_session(self, response_result, response_headers):
self.session_container.set_session_token(response_result, response_headers)
def get_session_token(self, resource_path):
return self.session_container.get_session_token(resource_path)
```
#### File: storage/file/file_client.py
```python
import functools
from io import BytesIO
from typing import ( # pylint: disable=unused-import
Optional, Union, IO, List, Dict, Any, Iterable,
TYPE_CHECKING
)
try:
from urllib.parse import urlparse, quote, unquote
except ImportError:
from urlparse import urlparse # type: ignore
from urllib2 import quote, unquote # type: ignore
import six
from azure.core.polling import LROPoller
from azure.core.paging import ItemPaged
from .models import HandlesPaged
from ._generated import AzureFileStorage
from ._generated.version import VERSION
from ._generated.models import StorageErrorException, FileHTTPHeaders
from ._shared.upload_chunking import IterStreamer
from ._shared.shared_access_signature import FileSharedAccessSignature
from ._shared.utils import (
StorageAccountHostsMixin,
parse_query,
get_length,
return_response_headers,
add_metadata_headers,
process_storage_error,
parse_connection_str)
from ._share_utils import upload_file_helper, deserialize_file_properties, StorageStreamDownloader
from .polling import CopyStatusPoller, CloseHandles
if TYPE_CHECKING:
from datetime import datetime
from .models import ShareProperties, FilePermissions, ContentSettings, FileProperties
from ._generated.models import HandleItem
class FileClient(StorageAccountHostsMixin):
"""A client to interact with a specific file, although that file may not yet exist.
:ivar str url:
The full endpoint URL to the File, including SAS token if used. This could be
either the primary endpoint, or the secondard endpoint depending on the current `location_mode`.
:ivar str primary_endpoint:
The full primary endpoint URL.
:ivar str primary_hostname:
The hostname of the primary endpoint.
:ivar str secondary_endpoint:
The full secondard endpoint URL if configured. If not available
a ValueError will be raised. To explicitly specify a secondary hostname, use the optional
`secondary_hostname` keyword argument on instantiation.
:ivar str secondary_hostname:
The hostname of the secondary endpoint. If not available this
will be None. To explicitly specify a secondary hostname, use the optional
`secondary_hostname` keyword argument on instantiation.
:ivar str location_mode:
The location mode that the client is currently using. By default
this will be "primary". Options include "primary" and "secondary".
:param str file_url: The full URI to the file. This can also be a URL to the storage account
or share, in which case the file and/or share must also be specified.
:param share: The share for the file. If specified, this value will override
a share value specified in the file URL.
:type share: str or ~azure.storage.file.models.ShareProperties
:param str file_path:
The file path to the file with which to interact. If specified, this value will override
a file value specified in the file URL.
:param str snapshot:
An optional file snapshot on which to operate.
:param credential:
The credential with which to authenticate. This is optional if the
account URL already has a SAS token. The value can be a SAS token string or an account
shared access key.
"""
def __init__( # type: ignore
self, file_url, # type: str
share=None, # type: Optional[Union[str, ShareProperties]]
file_path=None, # type: Optional[str]
snapshot=None, # type: Optional[Union[str, Dict[str, Any]]]
credential=None, # type: Optional[Any]
**kwargs # type: Any
):
# type: (...) -> None
try:
if not file_url.lower().startswith('http'):
file_url = "https://" + file_url
except AttributeError:
raise ValueError("File URL must be a string.")
parsed_url = urlparse(file_url.rstrip('/'))
if not parsed_url.path and not (share and file_path):
raise ValueError("Please specify a share and file name.")
if not parsed_url.netloc:
raise ValueError("Invalid URL: {}".format(file_url))
if hasattr(credential, 'get_token'):
raise ValueError("Token credentials not supported by the File service.")
path_share, path_file = "", ""
path_snapshot = None
if parsed_url.path:
path_share, _, path_file = parsed_url.path.lstrip('/').partition('/')
path_snapshot, sas_token = parse_query(parsed_url.query)
if not sas_token and not credential:
raise ValueError(
'You need to provide either an account key or SAS token when creating a storage service.')
try:
self.snapshot = snapshot.snapshot # type: ignore
except AttributeError:
try:
self.snapshot = snapshot['snapshot'] # type: ignore
except TypeError:
self.snapshot = snapshot or path_snapshot
try:
self.share_name = share.name # type: ignore
except AttributeError:
self.share_name = share or unquote(path_share) # type: ignore
if file_path:
self.file_path = file_path.split('/')
else:
self.file_path = [unquote(p) for p in path_file.split('/')]
self.file_name = self.file_path[-1]
self.directory_path = "/".join(self.file_path[:-1])
self._query_str, credential = self._format_query_string(
sas_token, credential, share_snapshot=self.snapshot)
super(FileClient, self).__init__(parsed_url, 'file', credential, **kwargs)
self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline)
def _format_url(self, hostname):
"""Format the endpoint URL according to the current location
mode hostname.
"""
share_name = self.share_name
if isinstance(share_name, six.text_type):
share_name = share_name.encode('UTF-8')
return "{}://{}/{}/{}{}".format(
self.scheme,
hostname,
quote(share_name),
"/".join([quote(p, safe='~') for p in self.file_path]),
self._query_str)
@classmethod
def from_connection_string(
cls, conn_str, # type: str
share=None, # type: Optional[Union[str, ShareProperties]]
file_path=None, # type: Optional[str]
snapshot=None, # type: Optional[Union[str, Dict[str, Any]]]
credential=None, # type: Optional[Any]
**kwargs # type: Any
):
# type: (...) -> FileClient
"""Create FileClient from a Connection String.
:param str conn_str:
A connection string to an Azure Storage account.
:param share: The share. This can either be the name of the share,
or an instance of ShareProperties
:type share: str or ~azure.storage.file.models.ShareProperties
:param str file_path:
The file path.
:param str snapshot:
An optional file snapshot on which to operate.
:param credential:
The credential with which to authenticate. This is optional if the
account URL already has a SAS token. The value can be a SAS token string or an account
shared access key.
Example:
.. literalinclude:: ../tests/test_file_samples_hello_world.py
:start-after: [START create_file_client]
:end-before: [END create_file_client]
:language: python
:dedent: 12
:caption: Creates the file client with connection string.
"""
account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')
if 'secondary_hostname' not in kwargs:
kwargs['secondary_hostname'] = secondary
return cls(
account_url, share=share, file_path=file_path, snapshot=snapshot, credential=credential, **kwargs)
def generate_shared_access_signature(
self, permission=None, # type: Optional[Union[FilePermissions, str]]
expiry=None, # type: Optional[Union[datetime, str]]
start=None, # type: Optional[Union[datetime, str]]
policy_id=None, # type: Optional[str]
ip=None, # type: Optional[str]
protocol=None, # type: Optional[str]
cache_control=None, # type: Optional[str]
content_disposition=None, # type: Optional[str]
content_encoding=None, # type: Optional[str]
content_language=None, # type: Optional[str]
content_type=None # type: Optional[str]
):
# type: (...) -> str
"""Generates a shared access signature for the file.
Use the returned signature with the credential parameter of any FileServiceClient,
ShareClient, DirectoryClient, or FileClient.
:param ~azure.storage.file.models.FilePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str policy_id:
A unique value up to 64 characters in length that correlates to a
stored access policy.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=192.168.3.11 or sip=172.16.58.3-172.16.17.32 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value is https.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
:return: A Shared Access Signature (sas) token.
:rtype: str
"""
if not hasattr(self.credential, 'account_key') or not self.credential.account_key:
raise ValueError("No account SAS key available.")
sas = FileSharedAccessSignature(self.credential.account_name, self.credential.account_key)
if len(self.file_path) > 1:
file_path = '/'.join(self.file_path[:-1])
else:
file_path = None # type: ignore
return sas.generate_file( # type: ignore
self.share_name,
file_path,
self.file_name,
permission,
expiry,
start=start,
policy_id=policy_id,
ip=ip,
protocol=protocol,
cache_control=cache_control,
content_disposition=content_disposition,
content_encoding=content_encoding,
content_language=content_language,
content_type=content_type)
def create_file( # type: ignore
self, size, # type: int
content_settings=None, # type: Optional[ContentSettings]
metadata=None, # type: Optional[Dict[str, str]]
timeout=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Dict[str, Any]
"""Creates a new file.
Note that it only initializes the file with no content.
:param int size: Specifies the maximum size for the file,
up to 1 TB.
:param ~azure.storage.file.models.ContentSettings content_settings:
ContentSettings object used to set file properties.
:param metadata:
Name-value pairs associated with the file as metadata.
:type metadata: dict(str, str)
:param int timeout:
The timeout parameter is expressed in seconds.
:returns: File-updated property dict (Etag and last modified).
:rtype: dict(str, Any)
Example:
.. literalinclude:: ../tests/test_file_samples_file.py
:start-after: [START create_file]
:end-before: [END create_file]
:language: python
:dedent: 12
:caption: Create a file.
"""
if self.require_encryption and not self.key_encryption_key:
raise ValueError("Encryption required but no key was provided.")
headers = kwargs.pop('headers', {})
headers.update(add_metadata_headers(metadata))
file_http_headers = None
if content_settings:
file_http_headers = FileHTTPHeaders(
file_cache_control=content_settings.cache_control,
file_content_type=content_settings.content_type,
file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
file_content_encoding=content_settings.content_encoding,
file_content_language=content_settings.content_language,
file_content_disposition=content_settings.content_disposition
)
try:
return self._client.file.create( # type: ignore
file_content_length=size,
timeout=timeout,
metadata=metadata,
file_http_headers=file_http_headers,
headers=headers,
cls=return_response_headers,
**kwargs)
except StorageErrorException as error:
process_storage_error(error)
def upload_file(
self, data, # type: Any
length=None, # type: Optional[int]
metadata=None, # type: Optional[Dict[str, str]]
content_settings=None, # type: Optional[ContentSettings]
validate_content=False, # type: bool
max_connections=1, # type: Optional[int]
timeout=None, # type: Optional[int]
encoding='UTF-8', # type: str
**kwargs # type: Any
):
# type: (...) -> Dict[str, Any]
"""Uploads a new file.
:param Any data:
Content of the file.
:param int length:
Length of the file in bytes. Specify its maximum size, up to 1 TiB.
:param metadata:
Name-value pairs associated with the file as metadata.
:type metadata: dict(str, str)
:param ~azure.storage.file.models.ContentSettings content_settings:
ContentSettings object used to set file properties.
:param bool validate_content:
If true, calculates an MD5 hash for each range of the file. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
file.
:param int max_connections:
Maximum number of parallel connections to use.
:param int timeout:
The timeout parameter is expressed in seconds.
:param str encoding:
Defaults to UTF-8.
:returns: File-updated property dict (Etag and last modified).
:rtype: dict(str, Any)
Example:
.. literalinclude:: ../tests/test_file_samples_file.py
:start-after: [START upload_file]
:end-before: [END upload_file]
:language: python
:dedent: 12
:caption: Upload a file.
"""
if self.require_encryption or (self.key_encryption_key is not None):
raise ValueError("Encryption not supported.")
if isinstance(data, six.text_type):
data = data.encode(encoding)
if length is None:
length = get_length(data)
if isinstance(data, bytes):
data = data[:length]
if isinstance(data, bytes):
stream = BytesIO(data)
elif hasattr(data, 'read'):
stream = data
elif hasattr(data, '__iter__'):
stream = IterStreamer(data, encoding=encoding) # type: ignore
else:
raise TypeError("Unsupported data type: {}".format(type(data)))
return upload_file_helper( # type: ignore
self,
stream,
length,
metadata,
content_settings,
validate_content,
timeout,
max_connections,
self._config.data_settings,
**kwargs)
def copy_file_from_url(
self, source_url, # type: str
metadata=None, # type: Optional[Dict[str, str]]
timeout=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Any
"""Copies the file from the provided URL to the file referenced by
the client.
:param str source_url:
Specifies the URL of the source file.
:param metadata:
Name-value pairs associated with the file as metadata.
:type metadata: dict(str, str)
:param int timeout:
The timeout parameter is expressed in seconds.
:returns: Polling object in order to wait on or abort the operation
:rtype: ~azure.storage.file.polling.CopyStatusPoller
Example:
.. literalinclude:: ../tests/test_file_samples_file.py
:start-after: [START copy_file_from_url]
:end-before: [END copy_file_from_url]
:language: python
:dedent: 12
:caption: Copy a file from a URL
"""
headers = kwargs.pop('headers', {})
headers.update(add_metadata_headers(metadata))
try:
start_copy = self._client.file.start_copy(
source_url,
timeout=None,
metadata=metadata,
headers=headers,
cls=return_response_headers,
**kwargs)
except StorageErrorException as error:
process_storage_error(error)
poller = CopyStatusPoller(
self, start_copy,
configuration=self._config,
timeout=timeout)
return poller
def download_file(
self, offset=None, # type: Optional[int]
length=None, # type: Optional[int]
validate_content=False, # type: bool
timeout=None, # type: Optional[int]
**kwargs
):
# type: (...) -> Iterable[bytes]
"""Downloads a file to a stream with automatic chunking.
:param int offset:
Start of byte range to use for downloading a section of the file.
Must be set if length is provided.
:param int length:
Number of bytes to read from the stream. This is optional, but
should be supplied for optimal performance.
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the file. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
file. Also note that if enabled, the memory-efficient upload algorithm
will not be used, because computing the MD5 hash requires buffering
entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
:param int timeout:
The timeout parameter is expressed in seconds.
:returns: A iterable data generator (stream)
Example:
.. literalinclude:: ../tests/test_file_samples_file.py
:start-after: [START download_file]
:end-before: [END download_file]
:language: python
:dedent: 12
:caption: Download a file.
"""
if self.require_encryption or (self.key_encryption_key is not None):
raise ValueError("Encryption not supported.")
if length is not None and offset is None:
raise ValueError("Offset value must not be None is length is set.")
return StorageStreamDownloader(
share=self.share_name,
file_name=self.file_name,
file_path='/'.join(self.file_path),
service=self._client.file,
config=self._config.data_settings,
offset=offset,
length=length,
validate_content=validate_content,
timeout=timeout,
**kwargs)
def delete_file(self, timeout=None, **kwargs):
# type: (Optional[int], Optional[Any]) -> None
"""Marks the specified file for deletion. The file is
later deleted during garbage collection.
:param int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
Example:
.. literalinclude:: ../tests/test_file_samples_file.py
:start-after: [START delete_file]
:end-before: [END delete_file]
:language: python
:dedent: 12
:caption: Delete a file.
"""
try:
self._client.file.delete(timeout=timeout, **kwargs)
except StorageErrorException as error:
process_storage_error(error)
def get_file_properties(self, timeout=None, **kwargs):
# type: (Optional[int], Any) -> FileProperties
"""Returns all user-defined metadata, standard HTTP properties, and
system properties for the file.
:param int timeout:
The timeout parameter is expressed in seconds.
:returns: FileProperties
:rtype: ~azure.storage.file.models.FileProperties
"""
try:
file_props = self._client.file.get_properties(
sharesnapshot=self.snapshot,
timeout=timeout,
cls=deserialize_file_properties,
**kwargs)
except StorageErrorException as error:
process_storage_error(error)
file_props.name = self.file_name
file_props.share_name = self.share_name
return file_props # type: ignore
def set_http_headers(self, content_settings, timeout=None, **kwargs): # type: ignore
#type: (ContentSettings, Optional[int], Optional[Any]) -> Dict[str, Any]
"""Sets HTTP headers on the file.
:param ~azure.storage.file.models.ContentSettings content_settings:
ContentSettings object used to set file properties.
:param int timeout:
The timeout parameter is expressed in seconds.
:returns: File-updated property dict (Etag and last modified).
:rtype: dict(str, Any)
"""
file_content_length = kwargs.pop('size', None)
file_http_headers = FileHTTPHeaders(
file_cache_control=content_settings.cache_control,
file_content_type=content_settings.content_type,
file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
file_content_encoding=content_settings.content_encoding,
file_content_language=content_settings.content_language,
file_content_disposition=content_settings.content_disposition
)
try:
return self._client.file.set_http_headers( # type: ignore
timeout=timeout,
file_content_length=file_content_length,
file_http_headers=file_http_headers,
cls=return_response_headers,
**kwargs)
except StorageErrorException as error:
process_storage_error(error)
def set_file_metadata(self, metadata=None, timeout=None, **kwargs): # type: ignore
#type: (Optional[Dict[str, Any]], Optional[int], Optional[Any]) -> Dict[str, Any]
"""Sets user-defined metadata for the specified file as one or more
name-value pairs.
Each call to this operation replaces all existing metadata
attached to the file. To remove all metadata from the file,
call this operation with no metadata dict.
:param metadata:
Name-value pairs associated with the file as metadata.
:type metadata: dict(str, str)
:param int timeout:
The timeout parameter is expressed in seconds.
:returns: File-updated property dict (Etag and last modified).
:rtype: dict(str, Any)
"""
headers = kwargs.pop('headers', {})
headers.update(add_metadata_headers(metadata)) # type: ignore
try:
return self._client.file.set_metadata( # type: ignore
timeout=timeout,
cls=return_response_headers,
headers=headers,
metadata=metadata,
**kwargs)
except StorageErrorException as error:
process_storage_error(error)
def upload_range( # type: ignore
self, data, # type: bytes
start_range, # type: int
end_range, # type: int
validate_content=False, # type: Optional[bool]
timeout=None, # type: Optional[int]
encoding='UTF-8',
**kwargs
):
# type: (...) -> Dict[str, Any]
"""Upload a range of bytes to a file.
:param bytes data:
The data to upload.
:param int start_range:
Start of byte range to use for uploading a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will upload first 512 bytes of file.
:param int end_range:
End of byte range to use for uploading a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will upload first 512 bytes of file.
:param bool validate_content:
If true, calculates an MD5 hash of the page content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
file.
:param int timeout:
The timeout parameter is expressed in seconds.
:param str encoding:
Defaults to UTF-8.
:returns: File-updated property dict (Etag and last modified).
:rtype: Dict[str, Any]
"""
if self.require_encryption or (self.key_encryption_key is not None):
raise ValueError("Encryption not supported.")
if isinstance(data, six.text_type):
data = data.encode(encoding)
content_range = 'bytes={0}-{1}'.format(start_range, end_range)
content_length = end_range - start_range + 1
try:
return self._client.file.upload_range( # type: ignore
range=content_range,
content_length=content_length,
optionalbody=data,
timeout=timeout,
validate_content=validate_content,
cls=return_response_headers,
**kwargs)
except StorageErrorException as error:
process_storage_error(error)
def get_ranges( # type: ignore
self, start_range=None, # type: Optional[int]
end_range=None, # type: Optional[int]
timeout=None, # type: Optional[int]
**kwargs
):
# type: (...) -> List[dict[str, int]]
"""Returns the list of valid ranges of a file.
:param int start_range:
Specifies the start offset of bytes over which to get ranges.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
Specifies the end offset of bytes over which to get ranges.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int timeout:
The timeout parameter is expressed in seconds.
:returns: A list of valid ranges.
:rtype: List[dict[str, int]]
"""
if self.require_encryption or (self.key_encryption_key is not None):
raise ValueError("Unsupported method for encryption.")
content_range = None
if start_range is not None:
if end_range is not None:
content_range = 'bytes={0}-{1}'.format(start_range, end_range)
else:
content_range = 'bytes={0}-'.format(start_range)
try:
ranges = self._client.file.get_range_list(
sharesnapshot=self.snapshot,
timeout=timeout,
range=content_range,
**kwargs)
except StorageErrorException as error:
process_storage_error(error)
return [{'start': b.start, 'end': b.end} for b in ranges]
def clear_range( # type: ignore
self, start_range, # type: int
end_range, # type: int
timeout=None, # type: Optional[int]
**kwargs
):
# type: (...) -> Dict[str, Any]
"""Clears the specified range and releases the space used in storage for
that range.
:param int start_range:
Start of byte range to use for clearing a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for clearing a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int timeout:
The timeout parameter is expressed in seconds.
:returns: File-updated property dict (Etag and last modified).
:rtype: Dict[str, Any]
"""
if self.require_encryption or (self.key_encryption_key is not None):
raise ValueError("Unsupported method for encryption.")
if start_range is None or start_range % 512 != 0:
raise ValueError("start_range must be an integer that aligns with 512 file size")
if end_range is None or end_range % 512 != 511:
raise ValueError("end_range must be an integer that aligns with 512 file size")
content_range = 'bytes={0}-{1}'.format(start_range, end_range)
try:
return self._client.file.upload_range( # type: ignore
timeout=timeout,
cls=return_response_headers,
content_length=0,
file_range_write="clear",
range=content_range,
**kwargs)
except StorageErrorException as error:
process_storage_error(error)
def resize_file(self, size, timeout=None, **kwargs): # type: ignore
# type: (int, Optional[int], Optional[Any]) -> Dict[str, Any]
"""Resizes a file to the specified size.
:param int size:
Size to resize file to (in bytes)
:param int timeout:
The timeout parameter is expressed in seconds.
:returns: File-updated property dict (Etag and last modified).
:rtype: Dict[str, Any]
"""
try:
return self._client.file.set_http_headers( # type: ignore
timeout=timeout,
file_content_length=size,
cls=return_response_headers,
**kwargs)
except StorageErrorException as error:
process_storage_error(error)
def list_handles(self, timeout=None, **kwargs):
# type: (int, Any) -> ItemPaged[Handle]
"""Lists handles for file.
:param int timeout:
The timeout parameter is expressed in seconds.
:returns: An auto-paging iterable of HandleItems
:rtype: ~azure.core.paging.ItemPaged[~azure.storage.file.models.Handle]
"""
results_per_page = kwargs.pop('results_per_page', None)
command = functools.partial(
self._client.file.list_handles,
sharesnapshot=self.snapshot,
timeout=timeout,
**kwargs)
return ItemPaged(
command, results_per_page=results_per_page,
page_iterator_class=HandlesPaged)
def close_handles(
self, handle=None, # type: Union[str, HandleItem]
timeout=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Any
"""Close open file handles.
This operation may not finish with a single call, so a long-running poller
is returned that can be used to wait until the operation is complete.
:param handle:
Optionally, a specific handle to close. The default value is '*'
which will attempt to close all open handles.
:type handle: str or ~azure.storage.file.models.Handle
:param int timeout:
The timeout parameter is expressed in seconds.
:returns: A long-running poller to get operation status.
:rtype: ~azure.core.polling.LROPoller
"""
try:
handle_id = handle.id # type: ignore
except AttributeError:
handle_id = handle or '*'
command = functools.partial(
self._client.file.force_close_handles,
handle_id,
timeout=timeout,
sharesnapshot=self.snapshot,
cls=return_response_headers,
**kwargs)
try:
start_close = command()
except StorageErrorException as error:
process_storage_error(error)
polling_method = CloseHandles(self._config.data_settings.copy_polling_interval)
return LROPoller(
command,
start_close,
None,
polling_method)
``` |
{
"source": "jiasli/knack",
"score": 2
} |
#### File: knack/knack/help.py
```python
from __future__ import print_function, unicode_literals
import argparse
import sys
import textwrap
from .deprecation import ImplicitDeprecated, resolve_deprecate_info
from .log import get_logger
from .preview import ImplicitPreviewItem, resolve_preview_info
from .experimental import ImplicitExperimentalItem, resolve_experimental_info
from .util import CtxTypeError
from .help_files import _load_help_file
logger = get_logger(__name__)
FIRST_LINE_PREFIX = ' : '
REQUIRED_TAG = '[Required]'
def _get_hanging_indent(max_length, indent):
return max_length + (indent * 4) + len(FIRST_LINE_PREFIX) - 1
def _get_padding_len(max_len, layout):
if layout['tags']:
pad_len = max_len - layout['line_len'] + 1
else:
pad_len = max_len - layout['line_len']
return pad_len
def _get_line_len(name, tags_len):
return len(name) + tags_len + (2 if tags_len else 1)
def _print_indent(s, indent=0, subsequent_spaces=-1, width=100):
tw = textwrap.TextWrapper(initial_indent=' ' * indent,
subsequent_indent=(' ' * indent
if subsequent_spaces == -1
else ' ' * subsequent_spaces),
replace_whitespace=False,
width=width)
paragraphs = s.split('\n')
for p in paragraphs:
try:
print(tw.fill(p), file=sys.stdout)
except UnicodeEncodeError:
print(tw.fill(p).encode('ascii', 'ignore').decode('utf-8', 'ignore'), file=sys.stdout)
class HelpAuthoringException(Exception):
pass
class ArgumentGroupRegistry(object): # pylint: disable=too-few-public-methods
def __init__(self, group_list):
self.priorities = {
None: 0,
'Global Arguments': 1000,
}
priority = 2
# any groups not already in the static dictionary should be prioritized alphabetically
other_groups = [g for g in sorted(list(set(group_list))) if g not in self.priorities]
for group in other_groups:
self.priorities[group] = priority
priority += 1
def get_group_priority(self, group_name):
key = self.priorities.get(group_name, 0)
return "%06d" % key
class HelpObject(object):
@staticmethod
def _normalize_text(s):
if not s or len(s) < 2:
return s or ''
s = s.strip()
initial_upper = s[0].upper() + s[1:]
trailing_period = '' if s[-1] in '.!?' else '.'
return initial_upper + trailing_period
def __init__(self, **kwargs):
self._short_summary = ''
self._long_summary = ''
super(HelpObject, self).__init__(**kwargs)
@property
def short_summary(self):
return self._short_summary
@short_summary.setter
def short_summary(self, value):
self._short_summary = self._normalize_text(value)
@property
def long_summary(self):
return self._long_summary
@long_summary.setter
def long_summary(self, value):
self._long_summary = self._normalize_text(value)
# pylint: disable=too-many-instance-attributes
class HelpFile(HelpObject):
@staticmethod
def _load_help_file_from_string(text):
import yaml
try:
return yaml.safe_load(text) if text else None
except Exception: # pylint: disable=broad-except
return text
def __init__(self, help_ctx, delimiters): # pylint: disable=too-many-statements
super(HelpFile, self).__init__()
self.help_ctx = help_ctx
self.delimiters = delimiters
self.name = delimiters.split()[-1] if delimiters else delimiters
self.command = delimiters
self.type = ''
self.short_summary = ''
self.long_summary = ''
self.examples = []
self.deprecate_info = None
self.preview_info = None
self.experimental_info = None
direct_deprecate_info = resolve_deprecate_info(help_ctx.cli_ctx, delimiters)
if direct_deprecate_info:
self.deprecate_info = direct_deprecate_info
# search for implicit deprecation
path_comps = delimiters.split()[:-1]
implicit_deprecate_info = None
while path_comps and not implicit_deprecate_info:
implicit_deprecate_info = resolve_deprecate_info(help_ctx.cli_ctx, ' '.join(path_comps))
del path_comps[-1]
if implicit_deprecate_info:
deprecate_kwargs = implicit_deprecate_info.__dict__.copy()
deprecate_kwargs['object_type'] = 'command' if delimiters in \
help_ctx.cli_ctx.invocation.commands_loader.command_table else 'command group'
del deprecate_kwargs['_get_tag']
del deprecate_kwargs['_get_message']
self.deprecate_info = ImplicitDeprecated(cli_ctx=help_ctx.cli_ctx, **deprecate_kwargs)
# resolve preview info
direct_preview_info = resolve_preview_info(help_ctx.cli_ctx, delimiters)
if direct_preview_info:
self.preview_info = direct_preview_info
# search for implicit preview
path_comps = delimiters.split()[:-1]
implicit_preview_info = None
while path_comps and not implicit_preview_info:
implicit_preview_info = resolve_preview_info(help_ctx.cli_ctx, ' '.join(path_comps))
del path_comps[-1]
if implicit_preview_info:
preview_kwargs = implicit_preview_info.__dict__.copy()
if delimiters in help_ctx.cli_ctx.invocation.commands_loader.command_table:
preview_kwargs['object_type'] = 'command'
else:
preview_kwargs['object_type'] = 'command group'
self.preview_info = ImplicitPreviewItem(cli_ctx=help_ctx.cli_ctx, **preview_kwargs)
# resolve experimental info
direct_experimental_info = resolve_experimental_info(help_ctx.cli_ctx, delimiters)
if direct_experimental_info:
self.experimental_info = direct_experimental_info
# search for implicit experimental
path_comps = delimiters.split()[:-1]
implicit_experimental_info = None
while path_comps and not implicit_experimental_info:
implicit_experimental_info = resolve_experimental_info(help_ctx.cli_ctx, ' '.join(path_comps))
del path_comps[-1]
if implicit_experimental_info:
experimental_kwargs = implicit_experimental_info.__dict__.copy()
if delimiters in help_ctx.cli_ctx.invocation.commands_loader.command_table:
experimental_kwargs['object_type'] = 'command'
else:
experimental_kwargs['object_type'] = 'command group'
self.experimental_info = ImplicitExperimentalItem(cli_ctx=help_ctx.cli_ctx, **experimental_kwargs)
def load(self, options):
description = getattr(options, 'description', None)
try:
self.short_summary = description[:description.index('.')]
long_summary = description[description.index('.') + 1:].lstrip()
self.long_summary = ' '.join(long_summary.splitlines())
except (ValueError, AttributeError):
self.short_summary = description
file_data = (self._load_help_file_from_string(options.help_file)
if hasattr(options, '_defaults')
else None)
if file_data:
self._load_from_data(file_data)
else:
self._load_from_file()
def _load_from_file(self):
file_data = _load_help_file(self.delimiters)
if file_data:
self._load_from_data(file_data)
def _load_from_data(self, data):
if not data:
return
if isinstance(data, str):
self.long_summary = data
return
if 'type' in data:
self.type = data['type']
if 'short-summary' in data:
self.short_summary = data['short-summary']
self.long_summary = data.get('long-summary')
if 'examples' in data:
self.examples = [HelpExample(d) for d in data['examples']]
class GroupHelpFile(HelpFile):
def __init__(self, help_ctx, delimiters, parser):
super(GroupHelpFile, self).__init__(help_ctx, delimiters)
self.type = 'group'
self.children = []
if getattr(parser, 'choices', None):
for options in parser.choices.values():
delimiters = ' '.join(options.prog.split()[1:])
child = (help_ctx.group_help_cls(self.help_ctx, delimiters, options) if options.is_group()
else help_ctx.help_cls(self.help_ctx, delimiters))
child.load(options)
try:
# don't hide implicitly deprecated commands
if not isinstance(child.deprecate_info, ImplicitDeprecated) and \
not child.deprecate_info.show_in_help():
continue
except AttributeError:
pass
self.children.append(child)
class CommandHelpFile(HelpFile):
def __init__(self, help_ctx, delimiters, parser):
super(CommandHelpFile, self).__init__(help_ctx, delimiters)
self.type = 'command'
self.parameters = []
for action in [a for a in parser._actions if a.help != argparse.SUPPRESS]: # pylint: disable=protected-access
if action.option_strings:
self._add_parameter_help(action)
else:
# use metavar for positional parameters
param_kwargs = {
'name_source': [action.metavar or action.dest],
'deprecate_info': getattr(action, 'deprecate_info', None),
'preview_info': getattr(action, 'preview_info', None),
'experimental_info': getattr(action, 'experimental_info', None),
'description': action.help,
'choices': action.choices,
'required': False,
'default': None,
'group_name': 'Positional'
}
self.parameters.append(HelpParameter(**param_kwargs))
help_param = next(p for p in self.parameters if p.name == '--help -h')
help_param.group_name = 'Global Arguments'
def _add_parameter_help(self, param):
param_kwargs = {
'description': param.help,
'choices': param.choices,
'required': param.required,
'default': param.default,
'group_name': param.container.description
}
normal_options = []
deprecated_options = []
for item in param.option_strings:
deprecated_info = getattr(item, 'deprecate_info', None)
if deprecated_info:
if deprecated_info.show_in_help():
deprecated_options.append(item)
else:
normal_options.append(item)
if deprecated_options:
param_kwargs.update({
'name_source': deprecated_options,
'deprecate_info': deprecated_options[0].deprecate_info
})
self.parameters.append(HelpParameter(**param_kwargs))
param_kwargs.update({
'name_source': normal_options,
'deprecate_info': getattr(param, 'deprecate_info', None),
'preview_info': getattr(param, 'preview_info', None),
'experimental_info': getattr(param, 'experimental_info', None),
'default_value_source': getattr(param, 'default_value_source', None)
})
self.parameters.append(HelpParameter(**param_kwargs))
def _load_from_data(self, data):
super(CommandHelpFile, self)._load_from_data(data)
if isinstance(data, str) or not self.parameters or not data.get('parameters'):
return
loaded_params = []
loaded_param = {}
for param in self.parameters:
loaded_param = next((n for n in data['parameters'] if n['name'] == param.name), None)
if loaded_param:
param.update_from_data(loaded_param)
loaded_params.append(param)
self.parameters = loaded_params
class HelpParameter(HelpObject): # pylint: disable=too-many-instance-attributes
def __init__(self, name_source, description, required, choices=None, default=None, group_name=None,
deprecate_info=None, preview_info=None, experimental_info=None, default_value_source=None):
super(HelpParameter, self).__init__()
self.name_source = name_source
self.name = ' '.join(sorted(name_source))
self.required = required
self.type = 'string'
self.short_summary = description
self.long_summary = ''
self.value_sources = []
self.choices = choices
self.default = default
self.group_name = group_name
self.deprecate_info = deprecate_info
self.preview_info = preview_info
self.experimental_info = experimental_info
self.default_value_source = default_value_source
def update_from_data(self, data):
if self.name != data.get('name'):
raise HelpAuthoringException(u"mismatched name {} vs. {}"
.format(self.name,
data.get('name')))
if data.get('type'):
self.type = data.get('type')
if data.get('short-summary'):
self.short_summary = data.get('short-summary')
if data.get('long-summary'):
self.long_summary = data.get('long-summary')
if data.get('populator-commands'):
self.value_sources = data.get('populator-commands')
class HelpExample(object): # pylint: disable=too-few-public-methods
def __init__(self, _data):
self.name = _data['name']
self.text = _data['text']
class CLIHelp(object):
def _print_header(self, cli_name, help_file):
indent = 0
_print_indent('')
_print_indent('Command' if help_file.type == 'command' else 'Group', indent)
indent += 1
LINE_FORMAT = u'{cli}{name}{separator}{summary}'
line = LINE_FORMAT.format(
cli=cli_name,
name=' ' + help_file.command if help_file.command else '',
separator=FIRST_LINE_PREFIX if help_file.short_summary else '',
summary=help_file.short_summary if help_file.short_summary else ''
)
_print_indent(line, indent, width=self.textwrap_width)
def _build_long_summary(item):
lines = []
if item.long_summary:
lines.append(item.long_summary)
if item.deprecate_info:
lines.append(str(item.deprecate_info.message))
if item.preview_info:
lines.append(str(item.preview_info.message))
if item.experimental_info:
lines.append(str(item.experimental_info.message))
return '\n'.join(lines)
indent += 1
long_sum = _build_long_summary(help_file)
_print_indent(long_sum, indent, width=self.textwrap_width)
def _print_groups(self, help_file):
LINE_FORMAT = u'{name}{padding}{tags}{separator}{summary}'
indent = 1
self.max_line_len = 0
def _build_tags_string(item):
preview_info = getattr(item, 'preview_info', None)
preview = preview_info.tag if preview_info else ''
experimental_info = getattr(item, 'experimental_info', None)
experimental = experimental_info.tag if experimental_info else ''
deprecate_info = getattr(item, 'deprecate_info', None)
deprecated = deprecate_info.tag if deprecate_info else ''
required = REQUIRED_TAG if getattr(item, 'required', None) else ''
tags = ' '.join([x for x in [str(deprecated), str(preview), str(experimental), required] if x])
tags_len = sum([
len(deprecated),
len(preview),
len(experimental),
len(required),
tags.count(' ')
])
if not tags_len:
tags = ''
return tags, tags_len
def _layout_items(items):
layouts = []
for c in sorted(items, key=lambda h: h.name):
tags, tags_len = _build_tags_string(c)
line_len = _get_line_len(c.name, tags_len)
layout = {
'name': c.name,
'tags': tags,
'separator': FIRST_LINE_PREFIX if c.short_summary else '',
'summary': c.short_summary or '',
'line_len': line_len
}
layout['summary'] = layout['summary'].replace('\n', ' ')
if line_len > self.max_line_len:
self.max_line_len = line_len
layouts.append(layout)
return layouts
def _print_items(layouts):
for layout in layouts:
layout['padding'] = ' ' * _get_padding_len(self.max_line_len, layout)
_print_indent(
LINE_FORMAT.format(**layout),
indent,
_get_hanging_indent(self.max_line_len, indent),
width=self.textwrap_width,
)
_print_indent('')
groups = [c for c in help_file.children if isinstance(c, self.group_help_cls)]
group_layouts = _layout_items(groups)
commands = [c for c in help_file.children if c not in groups]
command_layouts = _layout_items(commands)
if groups:
_print_indent('Subgroups:')
_print_items(group_layouts)
if commands:
_print_indent('Commands:')
_print_items(command_layouts)
@staticmethod
def _get_choices_defaults_sources_str(p):
choice_str = u' Allowed values: {}.'.format(', '.join(sorted([str(x) for x in p.choices]))) \
if p.choices else ''
default_str = u' Default: {}.'.format(p.default) \
if p.default and p.default != argparse.SUPPRESS else ''
value_sources_str = u' Values from: {}.'.format(', '.join(p.value_sources)) \
if p.value_sources else ''
return u'{}{}{}'.format(choice_str, default_str, value_sources_str)
@staticmethod
def print_description_list(help_files):
indent = 1
max_length = max(len(f.name) for f in help_files) if help_files else 0
for help_file in sorted(help_files, key=lambda h: h.name):
column_indent = max_length - len(help_file.name)
_print_indent(u'{}{}{}'.format(help_file.name,
' ' * column_indent,
FIRST_LINE_PREFIX + help_file.short_summary
if help_file.short_summary
else ''),
indent,
_get_hanging_indent(max_length, indent))
@staticmethod
def _print_examples(help_file):
indent = 0
_print_indent('Examples', indent)
for e in help_file.examples:
indent = 1
_print_indent(u'{0}'.format(e.name), indent)
indent = 2
_print_indent(u'{0}'.format(e.text), indent)
print('')
def _print_arguments(self, help_file): # pylint: disable=too-many-statements
LINE_FORMAT = u'{name}{padding}{tags}{separator}{short_summary}'
indent = 1
self.max_line_len = 0
if not help_file.parameters:
_print_indent('None', indent)
_print_indent('')
return None
def _build_tags_string(item):
preview_info = getattr(item, 'preview_info', None)
preview = preview_info.tag if preview_info else ''
experimental_info = getattr(item, 'experimental_info', None)
experimental = experimental_info.tag if experimental_info else ''
deprecate_info = getattr(item, 'deprecate_info', None)
deprecated = deprecate_info.tag if deprecate_info else ''
required = REQUIRED_TAG if getattr(item, 'required', None) else ''
tags = ' '.join([x for x in [str(deprecated), str(preview), str(experimental), required] if x])
tags_len = sum([
len(deprecated),
len(preview),
len(experimental),
len(required),
tags.count(' ')
])
if not tags_len:
tags = ''
return tags, tags_len
def _layout_items(items):
layouts = []
for c in sorted(items, key=_get_parameter_key):
deprecate_info = getattr(c, 'deprecate_info', None)
if deprecate_info and not deprecate_info.show_in_help():
continue
tags, tags_len = _build_tags_string(c)
short_summary = _build_short_summary(c)
long_summary = _build_long_summary(c)
line_len = _get_line_len(c.name, tags_len)
layout = {
'name': c.name,
'tags': tags,
'separator': FIRST_LINE_PREFIX if short_summary else '',
'short_summary': short_summary,
'long_summary': long_summary,
'group_name': c.group_name,
'line_len': line_len
}
if line_len > self.max_line_len:
self.max_line_len = line_len
layouts.append(layout)
return layouts
def _print_items(layouts):
last_group_name = ''
for layout in layouts:
indent = 1
if layout['group_name'] != last_group_name:
if layout['group_name']:
print('')
print(layout['group_name'])
last_group_name = layout['group_name']
layout['padding'] = ' ' * _get_padding_len(self.max_line_len, layout)
_print_indent(
LINE_FORMAT.format(**layout),
indent,
_get_hanging_indent(self.max_line_len, indent),
width=self.textwrap_width,
)
indent = 2
long_summary = layout.get('long_summary', None)
if long_summary:
_print_indent(long_summary, indent, width=self.textwrap_width)
_print_indent('')
def _build_short_summary(item):
short_summary = item.short_summary
possible_values_index = short_summary.find(' Possible values include')
short_summary = short_summary[0:possible_values_index
if possible_values_index >= 0 else len(short_summary)]
short_summary += self._get_choices_defaults_sources_str(item)
short_summary = short_summary.strip()
return short_summary
def _build_long_summary(item):
lines = []
if item.long_summary:
lines.append(item.long_summary)
deprecate_info = getattr(item, 'deprecate_info', None)
if deprecate_info:
lines.append(str(item.deprecate_info.message))
preview_info = getattr(item, 'preview_info', None)
if preview_info:
lines.append(str(item.preview_info.message))
experimental_info = getattr(item, 'experimental_info', None)
if experimental_info:
lines.append(str(item.experimental_info.message))
return ' '.join(lines)
group_registry = ArgumentGroupRegistry([p.group_name for p in help_file.parameters if p.group_name])
def _get_parameter_key(parameter):
return u'{}{}{}'.format(group_registry.get_group_priority(parameter.group_name),
str(not parameter.required),
parameter.name)
parameter_layouts = _layout_items(help_file.parameters)
_print_items(parameter_layouts)
return indent
def _print_detailed_help(self, cli_name, help_file):
self._print_header(cli_name, help_file)
if help_file.long_summary or getattr(help_file, 'deprecate_info', None):
_print_indent('')
# fix incorrect groupings instead of crashing
if help_file.type == 'command' and not isinstance(help_file, CommandHelpFile):
help_file.type = 'group'
logger.info("'%s' is labeled a command but is actually a group!", help_file.delimiters)
elif help_file.type == 'group' and not isinstance(help_file, GroupHelpFile):
help_file.type = 'command'
logger.info("'%s' is labeled a group but is actually a command!", help_file.delimiters)
if help_file.type == 'command':
_print_indent('Arguments')
self._print_arguments(help_file)
elif help_file.type == 'group':
self._print_groups(help_file)
if help_file.examples:
self._print_examples(help_file)
def __init__(self, cli_ctx=None, privacy_statement='', welcome_message='',
group_help_cls=GroupHelpFile, command_help_cls=CommandHelpFile,
help_cls=HelpFile, textwrap_width=100):
""" Manages the generation and production of help in the CLI
:param cli_ctx: CLI Context
:type cli_ctx: knack.cli.CLI
:param privacy_statement: Privacy statement for the CLI
:type privacy_statement: str
:param welcome_message: A welcome message for the CLI
:type welcome_message: str
:param group_help_cls: Class to use for formatting group help.
:type group_help_cls: HelpFile
:param command_help_cls: Class to use for formatting command help.
:type command_help_cls: HelpFile
:param command_help_cls: Class to use for formatting generic help.
:type command_help_cls: HelpFile
:param textwrap_width: Line length to which text will be wrapped.
:type textwrap_width: int
"""
from .cli import CLI
if cli_ctx is not None and not isinstance(cli_ctx, CLI):
raise CtxTypeError(cli_ctx)
self.cli_ctx = cli_ctx
self.privacy_statement = privacy_statement
self.welcome_message = welcome_message
self.max_line_len = 0
self.group_help_cls = group_help_cls
self.command_help_cls = command_help_cls
self.help_cls = help_cls
self.textwrap_width = textwrap_width
def show_privacy_statement(self):
ran_before = self.cli_ctx.config.getboolean('core', 'first_run', fallback=False)
if not ran_before:
if self.privacy_statement:
print(self.privacy_statement, file=self.cli_ctx.out_file)
self.cli_ctx.config.set_value('core', 'first_run', 'yes')
def show_welcome_message(self):
_print_indent(self.welcome_message, width=self.textwrap_width)
def show_welcome(self, parser):
self.show_privacy_statement()
self.show_welcome_message()
help_file = self.group_help_cls(self, '', parser)
self.print_description_list(help_file.children)
def show_help(self, cli_name, nouns, parser, is_group):
delimiters = ' '.join(nouns)
help_file = self.command_help_cls(self, delimiters, parser) if not is_group \
else self.group_help_cls(self, delimiters, parser)
help_file.load(parser)
if not nouns:
help_file.command = ''
self._print_detailed_help(cli_name, help_file)
``` |
{
"source": "jiasli/pyaz",
"score": 2
} |
#### File: jiasli/pyaz/example.py
```python
from pyaz import az
def az_demo(args_str):
print("calling: az", args_str)
result = az(args_str)
print("exit code:", result.exit_code)
print("out:", result.out)
print("log:", result.log)
def main():
# exit code 0: succeed
az_demo("account list-locations --query \"[?name=='westus'] | [0]\"")
# exit code 1: request error
az_demo("group create -l eastus4 -n foo")
# exit code 2: command parsing error
az_demo("group not-exist")
# exit code 3: resource doesn't exist
az_demo("group show -g not-exist")
# Happy scripting with Python!
import json
accounts = json.loads(az('account list').out)
print("My subscriptions:")
for a in accounts:
selected = "*" if a["isDefault"] else " "
print("{} {} {}".format(selected, a["id"], a["name"]))
if __name__ == "__main__":
main()
``` |
{
"source": "Jiasm/ClickHouse",
"score": 2
} |
#### File: integration/test_odbc_interaction/test.py
```python
import time
import pytest
import os
import pymysql.cursors
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from helpers.cluster import ClickHouseCluster
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
cluster = ClickHouseCluster(__file__, base_configs_dir=os.path.join(SCRIPT_DIR, 'configs'))
node1 = cluster.add_instance('node1', with_odbc_drivers=True, with_mysql=True, image='yandex/clickhouse-integration-test', main_configs=['configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml', 'configs/dictionaries/sqlite3_odbc_cached_dictionary.xml', 'configs/dictionaries/postgres_odbc_hashed_dictionary.xml'], stay_alive=True)
create_table_sql_template = """
CREATE TABLE `clickhouse`.`{}` (
`id` int(11) NOT NULL,
`name` varchar(50) NOT NULL,
`age` int NOT NULL default 0,
`money` int NOT NULL default 0,
`column_x` int default NULL,
PRIMARY KEY (`id`)) ENGINE=InnoDB;
"""
def get_mysql_conn():
conn = pymysql.connect(user='root', password='<PASSWORD>', host='127.0.0.1', port=3308)
return conn
def create_mysql_db(conn, name):
with conn.cursor() as cursor:
cursor.execute(
"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(name))
def create_mysql_table(conn, table_name):
with conn.cursor() as cursor:
cursor.execute(create_table_sql_template.format(table_name))
def get_postgres_conn():
conn_string = "host='localhost' user='postgres' password='<PASSWORD>'"
conn = psycopg2.connect(conn_string)
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
conn.autocommit = True
return conn
def create_postgres_db(conn, name):
cursor = conn.cursor()
cursor.execute("CREATE SCHEMA {}".format(name))
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
sqlite_db = node1.odbc_drivers["SQLite3"]["Database"]
print "sqlite data received"
node1.exec_in_container(["bash", "-c", "echo 'CREATE TABLE t1(x INTEGER PRIMARY KEY ASC, y, z);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root')
node1.exec_in_container(["bash", "-c", "echo 'CREATE TABLE t2(X INTEGER PRIMARY KEY ASC, Y, Z);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root')
node1.exec_in_container(["bash", "-c", "echo 'CREATE TABLE t3(X INTEGER PRIMARY KEY ASC, Y, Z);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root')
node1.exec_in_container(["bash", "-c", "echo 'CREATE TABLE t4(X INTEGER PRIMARY KEY ASC, Y, Z);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root')
print "sqlite tables created"
mysql_conn = get_mysql_conn()
print "mysql connection received"
## create mysql db and table
create_mysql_db(mysql_conn, 'clickhouse')
print "mysql database created"
postgres_conn = get_postgres_conn()
print "postgres connection received"
create_postgres_db(postgres_conn, 'clickhouse')
print "postgres db created"
cursor = postgres_conn.cursor()
cursor.execute("create table if not exists clickhouse.test_table (column1 int primary key, column2 varchar(40) not null)")
yield cluster
except Exception as ex:
print(ex)
raise ex
finally:
cluster.shutdown()
def test_mysql_simple_select_works(started_cluster):
mysql_setup = node1.odbc_drivers["MySQL"]
table_name = 'test_insert_select'
conn = get_mysql_conn()
create_mysql_table(conn, table_name)
# Check that NULL-values are handled correctly by the ODBC-bridge
with conn.cursor() as cursor:
cursor.execute("INSERT INTO clickhouse.{} VALUES(50, 'null-guy', 127, 255, NULL), (100, 'non-null-guy', 127, 255, 511);".format(table_name))
conn.commit()
assert node1.query("SELECT column_x FROM odbc('DSN={}', '{}')".format(mysql_setup["DSN"], table_name), settings={"external_table_functions_use_nulls": "1"}) == '\\N\n511\n'
assert node1.query("SELECT column_x FROM odbc('DSN={}', '{}')".format(mysql_setup["DSN"], table_name), settings={"external_table_functions_use_nulls": "0"}) == '0\n511\n'
node1.query('''
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, column_x Nullable(UInt32)) ENGINE = MySQL('mysql1:3306', 'clickhouse', '{}', 'root', 'clickhouse');
'''.format(table_name, table_name))
node1.query("INSERT INTO {}(id, name, money, column_x) select number, concat('name_', toString(number)), 3, NULL from numbers(49) ".format(table_name))
node1.query("INSERT INTO {}(id, name, money, column_x) select number, concat('name_', toString(number)), 3, 42 from numbers(51, 49) ".format(table_name))
assert node1.query("SELECT COUNT () FROM {} WHERE column_x IS NOT NULL".format(table_name)) == '50\n'
assert node1.query("SELECT COUNT () FROM {} WHERE column_x IS NULL".format(table_name)) == '50\n'
assert node1.query("SELECT count(*) FROM odbc('DSN={}', '{}')".format(mysql_setup["DSN"], table_name)) == '100\n'
# previously this test fails with segfault
# just to be sure :)
assert node1.query("select 1") == "1\n"
conn.close()
def test_mysql_insert(started_cluster):
mysql_setup = node1.odbc_drivers["MySQL"]
table_name = 'test_insert'
conn = get_mysql_conn()
create_mysql_table(conn, table_name)
odbc_args = "'DSN={}', '{}', '{}'".format(mysql_setup["DSN"], mysql_setup["Database"], table_name)
node1.query("create table mysql_insert (id Int64, name String, age UInt8, money Float, column_x Nullable(Int16)) Engine=ODBC({})".format(odbc_args))
node1.query("insert into mysql_insert values (1, 'test', 11, 111, 1111), (2, 'odbc', 22, 222, NULL)")
assert node1.query("select * from mysql_insert") == "1\ttest\t11\t111\t1111\n2\todbc\t22\t222\t\\N\n"
node1.query("insert into table function odbc({}) values (3, 'insert', 33, 333, 3333)".format(odbc_args))
node1.query("insert into table function odbc({}) (id, name, age, money) select id*4, upper(name), age*4, money*4 from odbc({}) where id=1".format(odbc_args, odbc_args))
assert node1.query("select * from mysql_insert where id in (3, 4)") == "3\tinsert\t33\t333\t3333\n4\tTEST\t44\t444\t\\N\n"
def test_sqlite_simple_select_function_works(started_cluster):
sqlite_setup = node1.odbc_drivers["SQLite3"]
sqlite_db = sqlite_setup["Database"]
node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t1 values(1, 2, 3);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root')
assert node1.query("select * from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "1\t2\t3\n"
assert node1.query("select y from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "2\n"
assert node1.query("select z from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "3\n"
assert node1.query("select x from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "1\n"
assert node1.query("select x, y from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "1\t2\n"
assert node1.query("select z, x, y from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "3\t1\t2\n"
assert node1.query("select count(), sum(x) from odbc('DSN={}', '{}') group by x".format(sqlite_setup["DSN"], 't1')) == "1\t1\n"
def test_sqlite_simple_select_storage_works(started_cluster):
sqlite_setup = node1.odbc_drivers["SQLite3"]
sqlite_db = sqlite_setup["Database"]
node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t4 values(1, 2, 3);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root')
node1.query("create table SqliteODBC (x Int32, y String, z String) engine = ODBC('DSN={}', '', 't4')".format(sqlite_setup["DSN"]))
assert node1.query("select * from SqliteODBC") == "1\t2\t3\n"
assert node1.query("select y from SqliteODBC") == "2\n"
assert node1.query("select z from SqliteODBC") == "3\n"
assert node1.query("select x from SqliteODBC") == "1\n"
assert node1.query("select x, y from SqliteODBC") == "1\t2\n"
assert node1.query("select z, x, y from SqliteODBC") == "3\t1\t2\n"
assert node1.query("select count(), sum(x) from SqliteODBC group by x") == "1\t1\n"
def test_sqlite_odbc_hashed_dictionary(started_cluster):
sqlite_db = node1.odbc_drivers["SQLite3"]["Database"]
node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t2 values(1, 2, 3);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root')
assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(1))") == "3\n"
assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))") == "1\n" # default
time.sleep(5) # first reload
node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t2 values(200, 2, 7);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root')
# No reload because of invalidate query
time.sleep(5)
assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(1))") == "3\n"
assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))") == "1\n" # still default
node1.exec_in_container(["bash", "-c", "echo 'REPLACE INTO t2 values(1, 2, 5);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root')
# waiting for reload
time.sleep(5)
assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(1))") == "5\n"
assert node1.query("select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))") == "7\n" # new value
def test_sqlite_odbc_cached_dictionary(started_cluster):
sqlite_db = node1.odbc_drivers["SQLite3"]["Database"]
node1.exec_in_container(["bash", "-c", "echo 'INSERT INTO t3 values(1, 2, 3);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root')
assert node1.query("select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(1))") == "3\n"
# Allow insert
node1.exec_in_container(["bash", "-c", "chmod a+rw /tmp"], privileged=True, user='root')
node1.exec_in_container(["bash", "-c", "chmod a+rw {}".format(sqlite_db)], privileged=True, user='root')
node1.query("insert into table function odbc('DSN={};', '', 't3') values (200, 2, 7)".format(node1.odbc_drivers["SQLite3"]["DSN"]))
assert node1.query("select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(200))") == "7\n" # new value
node1.exec_in_container(["bash", "-c", "echo 'REPLACE INTO t3 values(1, 2, 12);' | sqlite3 {}".format(sqlite_db)], privileged=True, user='root')
time.sleep(5)
assert node1.query("select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(1))") == "12\n"
def test_postgres_odbc_hached_dictionary_with_schema(started_cluster):
conn = get_postgres_conn()
cursor = conn.cursor()
cursor.execute("insert into clickhouse.test_table values(1, 'hello'),(2, 'world')")
time.sleep(5)
assert node1.query("select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(1))") == "hello\n"
assert node1.query("select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(2))") == "world\n"
def test_postgres_odbc_hached_dictionary_no_tty_pipe_overflow(started_cluster):
conn = get_postgres_conn()
cursor = conn.cursor()
cursor.execute("insert into clickhouse.test_table values(3, 'xxx')")
for i in xrange(100):
try:
node1.query("system reload dictionary postgres_odbc_hashed", timeout=5)
except Exception as ex:
assert False, "Exception occured -- odbc-bridge hangs: " + str(ex)
assert node1.query("select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(3))") == "xxx\n"
def test_postgres_insert(started_cluster):
conn = get_postgres_conn()
conn.cursor().execute("truncate table clickhouse.test_table")
# Also test with Servername containing '.' and '-' symbols (defined in
# postgres .yml file). This is needed to check parsing, validation and
# reconstruction of connection string.
node1.query("create table pg_insert (column1 UInt8, column2 String) engine=ODBC('DSN=postgresql_odbc;Servername=postgre-sql.local', 'clickhouse', 'test_table')")
node1.query("insert into pg_insert values (1, 'hello'), (2, 'world')")
assert node1.query("select * from pg_insert") == '1\thello\n2\tworld\n'
node1.query("insert into table function odbc('DSN=postgresql_odbc;', 'clickhouse', 'test_table') format CSV 3,test")
node1.query("insert into table function odbc('DSN=postgresql_odbc;Servername=postgre-sql.local', 'clickhouse', 'test_table') select number, 's' || toString(number) from numbers (4, 7)")
assert node1.query("select sum(column1), count(column1) from pg_insert") == "55\t10\n"
assert node1.query("select sum(n), count(n) from (select (*,).1 as n from (select * from odbc('DSN=postgresql_odbc;', 'clickhouse', 'test_table')))") == "55\t10\n"
def test_bridge_dies_with_parent(started_cluster):
node1.query("select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(1))")
clickhouse_pid = node1.get_process_pid("clickhouse server")
bridge_pid = node1.get_process_pid("odbc-bridge")
assert clickhouse_pid is not None
assert bridge_pid is not None
while clickhouse_pid is not None:
try:
node1.exec_in_container(["bash", "-c", "kill {}".format(clickhouse_pid)], privileged=True, user='root')
except:
pass
clickhouse_pid = node1.get_process_pid("clickhouse server")
time.sleep(1)
time.sleep(1) # just for sure, that odbc-bridge caught signal
bridge_pid = node1.get_process_pid("odbc-bridge")
if bridge_pid:
out = node1.exec_in_container(["gdb", "-p", str(bridge_pid), "--ex", "thread apply all bt", "--ex", "q"], privileged=True, user='root')
print("Bridge is running, gdb output:")
print(out)
assert clickhouse_pid is None
assert bridge_pid is None
``` |
{
"source": "jiasudemotuohe/deep_learning",
"score": 3
} |
#### File: deep_learning/eunite/eunite_data.py
```python
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
EUNITE_PATH = "dataset/eunite.xlsx"
PARSE_TABLE_NAME = "mainData"
def load_eunite_data():
"""
return the generated load data, include all the features wo handle
"""
data = open_file()
X, Y = generate_features(data)
return X.values, Y.values
def load_eunite_train_data():
X, Y = load_eunite_data()
trains_test_rate = int(len(X) * 0.7)
train_x = X[0: trains_test_rate]
train_y = Y[0: trains_test_rate]
test_x = X[trains_test_rate:]
test_y = Y[trains_test_rate:]
return train_x, train_y, test_x, test_y
def generate_features(df):
"""
parse the data, wo need to transfer the class number to ont_hot for our calculate later
"""
months = df["Month"]
days = df["Day"]
one_hot_months = cast_to_one_hot(months, n_classes=12)
days = cast_to_one_hot(days, n_classes=31)
one_hot_months = pd.DataFrame(one_hot_months)
days = pd.DataFrame(days)
df = pd.merge(left=df, right=one_hot_months, left_index=True, right_index=True)
df = pd.merge(left=df, right=days, left_index=True, right_index=True)
y = df['Max Load']
# think, maybe wo need to normalization the temperature data,
temperature = normalization(df['Temp'].values)
temperature = pd.DataFrame(temperature)
df = pd.merge(left=df, right=temperature, left_index=True, right_index=True)
drop_columns = ["ID", "Month", "Day", "Year", "Max Load", "Temp"]
df.drop(drop_columns, axis=1, inplace=True)
print(df[0:10], "\n", y[0])
return df, y
def normalization(data):
return (data - np.mean(data)) / np.max(np.abs(data))
def cast_to_one_hot(data, n_classes):
"""
cast the classifier data to one hot
"""
one_hot_months = np.eye(N=n_classes)[[data - 1]]
return one_hot_months
def show_month_temperature_load_image(df):
plt.title("relation of temperature and load")
max_load = df["Max Load"]
temp = df['Temp'] * 15
plt.plot(max_load)
plt.plot(temp)
plt.xlabel('time')
plt.annotate('temperature', xy=[200, 200], xytext=(300, 200))
plt.annotate('load', xy=[200, 600], xytext=(200, 800))
plt.show()
def open_file():
"""
open the eunite load excel file to return
"""
xlsx_file = pd.ExcelFile(EUNITE_PATH)
return xlsx_file.parse(PARSE_TABLE_NAME)
if __name__ == '__main__':
df = open_file()
show_month_temperature_load_image(df)
x, y = load_eunite_data()
print(x.shape)
```
#### File: deep_learning/manual_bpnn/bpnn_manual.py
```python
import numpy as np
import random
import data_loader
import math
'''
30:20:1
'''
MINIMUN_NUMBER = 0.0000001
class BPNNeuralClassification:
def __init__(self, sizes):
self.num_layers = len(sizes)
self.bias = [np.random.randn(n, 1) for n in sizes[1:]] # bias
self.weights = [np.random.randn(c, r) for c, r in zip(sizes[1:], sizes[:-1])] # weight
def train(self, x_batch, y_batch, learning_rate=0.5, max_step=5000):
self.n_samples = len(x_batch)
self.learning_rate = learning_rate
for i in range(max_step):
delta_w_batch = [np.zeros(w.shape) for w in self.weights]
delta_b_batch = [np.zeros(b.shape) for b in self.bias]
loss_sum = 0
for x, y in zip(x_batch, y_batch):
delta_w, delta_b, loss = self.back_propagation(x, y)
delta_b_batch = [bb + dbb for bb, dbb in zip(delta_b_batch, delta_b)]
delta_w_batch = [bw + dbw for bw, dbw in zip(delta_w_batch, delta_w)]
loss_sum += loss
self.weights = [w - dw/self.n_samples * learning_rate for w, dw in zip(self.weights, delta_w_batch)]
self.bias = [b - db/self.n_samples * learning_rate for b, db in zip(self.bias, delta_b_batch)]
if i % 500 == 0:
print("this is %s epoch loss is %s" % (i, loss_sum))
def back_propagation(self, a, y):
a = a.reshape((-1, 1))
activations = [a]
zs = []
for w, b in zip(self.weights, self.bias):
z = np.dot(w, a) + b
a = self.sigmoid(z)
zs.append(z)
activations.append(a)
# back propagation, to calculate the loss function, and use the loss to calculate the delta_w, delta_b
delta_w = [np.zeros(w.shape) for w in self.weights]
delta_b = [np.zeros(b.shape) for b in self.bias]
loss = self.loss(y, activations[-1])
loss_delta = self.loss_equation_derivative(y, activations[-1])
for i in range(1, self.num_layers): # -1, -2
if i == 1: # back_calculate the delta_w, delta_b, i==1 calculate the last layer's delta
delta_b[-i] = loss_delta * self.sigmoid_derivative(zs[-i])
delta_w[-i] = np.dot(delta_b[-i], activations[-i-1].T)
else:
delta_b[-i] = np.dot(self.weights[-i+1].T, delta_b[-i+1]) * self.sigmoid_derivative(zs[-i])
delta_w[-i] = np.dot(delta_b[-i], activations[-i-1].T)
return delta_w, delta_b, loss
def sigmoid_derivative(self, z):
return self.sigmoid(z) * (1 - self.sigmoid(z))
def loss_equation_derivative(self, y, y_pred):
return -(y / y_pred + (y - 1) / (1-y_pred))
# return y * (1 / y_pred) + (1 - y) * (1 / (1-y_pred))
def loss(self, y, y_pred):
loss = y * np.log(y_pred + MINIMUN_NUMBER) + (1-y) * np.log(1-y_pred + MINIMUN_NUMBER)
return -loss
def sigmoid(self, z):
return 1/(1 + np.exp(-z))
def predict(self, x, y):
z1 = np.dot(self.weights[0], x.T) + self.bias[0]
a1 = self.sigmoid(z1)
z2 = np.dot(self.weights[1], a1) + self.bias[1]
a2 = self.sigmoid(z2)
print(a2)
print(y)
def run():
x, target, feature_names, target_names = data_loader.load_cancer_data()
size = [30, 10, 1]
model = BPNNeuralClassification(size)
model.train(x, target)
# model.predict(x, target)
if __name__ == "__main__":
# test()
run()
```
#### File: deep_learning/manual_bpnn/logist_regression_04.py
```python
import numpy as np
import random
import math
extreme_min_number = 0
def main():
x_list, y_list = generate_data() # generate the data
train(x_list, y_list)
def train(x, y):
learning_rate = 0.1
max_step = 10000
w = np.array([random.random(), random.random()])
bias = np.array([random.random()])
for i in range(max_step):
y_pred = inference(x, w, bias)
dw, db = gradient(x, y, y_pred)
w += -learning_rate * dw
bias += -learning_rate * db
loss = eval_loss(y, y_pred)
if i % 100 == 0:
print("i = %s dw=%s db=%s loss = %s" % (i, dw, db, loss))
print("the final w=%s bias=%s" % (w, bias))
def gradient(x, y, y_pred):
y_diff = (y - y_pred).reshape((-1, 1))
dw = x * y_diff
db = 1 * y_diff
dw_avg = dw.mean(axis=0)
db_avg = db.mean(axis=0)
return dw_avg, db_avg
def eval_loss(y_true, y_pred):
loss = 0
for i in range(len(y_true)):
if y_pred[i] == 0:
loss += y_true[i] * math.log(y_pred[i] + extreme_min_number) + (1.0 - y_true[i]) * math.log(1.0 - y_pred[i])
elif y_pred[i] == 1:
loss += y_true[i] * math.log(y_pred[i]) + (1.0 - y_true[i]) * math.log(1.0 - y_pred[i] + extreme_min_number)
else:
loss += y_true[i] * math.log(y_pred[i]) + (1.0 - y_true[i]) * math.log(1.0 - y_pred[i])
return loss
def inference(x, w, bias):
y_value = np.dot(x, w.T) + bias
return sigmoid_function(y_value, label=0)
def generate_data():
theta1 = 2
theta2 = 3
bias_1 = 1
x_list = []
y_list = []
n_samples = 300
for i in range(n_samples):
x_1 = random.randint(-1, 1)
x_2 = random.randint(-1, 1)
y_value = theta1 * x_1 + theta2 * x_2 + bias_1
y_label = sigmoid_function(y_value, label=1)
x_list.append([x_1, x_2])
y_list.append(y_label)
return np.array(x_list), np.array(y_list)
def sigmoid_function(y_value, label):
if label == 1:
return 1 if 1 / (1 + math.exp(-y_value)) >= 0.5 else 0
else:
y_proba = []
for i in range(len(y_value)):
try:
temp = 1 / (1 + math.exp(-y_value[i] + extreme_min_number))
y_proba.append(temp)
except Exception as ex:
print(-y_value[i] + extreme_min_number, ex)
return y_proba
if __name__ == "__main__":
# test()
main()
```
#### File: deep_learning/tensorflow/keras_residual_net.py
```python
from tensorflow import keras
import data_utils
import tensorflow_docs as tf_docs
from matplotlib import pyplot
import tensorflow_docs.plots
import tensorflow as tf
"""
use keras funciton api to imlements the cnn redisual network
"""
def identity_block(x, filters, kernel_sizes, strides, paddings, block):
"""
identity block,
"""
short_cut_x = x
x = keras.layers.Conv2D(filters=filters[0], kernel_size=kernel_sizes[0], strides=strides[0], padding=paddings[0],
data_format='channels_last', activation=keras.activations.relu, name='main_route'+block +
'a1')(x)
x = keras.layers.BatchNormalization(axis=3)(x)
conv2d = keras.layers.Conv2D(filters=filters[1], kernel_size=kernel_sizes[1], strides=strides[1], padding=paddings[1]
, data_format='channels_last', use_bias=True, name='main_route'+block+'2a')
"""
here we need to use the short_cut_x as the short_cut,
if the shape of the short_cut_x not equal to the shape of the x, we need to use w*short_cut_x to reshape the
short_cut_x, thus we can add the sort_cut_x and x
"""
x = keras.layers.BatchNormalization(axis=3)(x)
x = conv2d(x) + short_cut_x
return keras.layers.Activation(keras.activations.relu)(x)
def create_residual_model():
img_inputs = keras.Input(shape=(64, 64, 3))
x = keras.layers.Conv2D(filters=64, kernel_size=(3, 3), padding='same', strides=(1, 1))(img_inputs)
x = identity_block(x, filters=[64, 64], kernel_sizes=[(3, 3), [3, 3]], strides=[(1, 1), (1, 1)],
paddings=['same', 'same'], block='block_1')
x = keras.layers.MaxPool2D()(x)
x = identity_block(x, filters=[64, 64], kernel_sizes=[(3, 3), (3, 3)], strides=[(1, 1), (1, 1)],
paddings=['same', 'same'], block='block_2')
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(64)(x)
x = keras.layers.Dense(10)(x)
model = keras.Model(inputs=img_inputs, outputs=x)
model.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
return model
def train(model):
train_x, train_y, test_x, test_y = data_utils.load_gesture_symbol_data()
call_backs = [keras.callbacks.TensorBoard(log_dir='logs')]
history = model.fit(train_x, train_y, epochs=5, callbacks=call_backs, batch_size=128,
validation_data=(test_x, test_y))
plooter = tf_docs.plots.HistoryPlotter()
plooter.plot({"residual": history}, metric='accuracy')
plooter.plot({"residual": history}, metric='loss')
pyplot.show()
if __name__ == '__main__':
model = create_residual_model()
train(model)
``` |
{
"source": "jiata/nbtoolz",
"score": 2
} |
#### File: nbtoolz/nbtoolz/nbtoolz.py
```python
import glob
from itertools import chain
import nbformat
from toolz import curry, compose
import logging
import sys
import json
def _read_notebook(filename):
return nbformat.read(filename, as_version=4)
@curry
def _strip_sensitive(cell, strip_tag="stripout"):
if "tags" in cell["metadata"].keys() and strip_tag in cell["metadata"]["tags"]:
cell["outputs"] = []
return cell
@curry
def _replace_strings(find_str, replace_str, cell):
cell["source"] = cell["source"].replace(find_str, replace_str)
return cell
def _write_notebook(notebook_filename, nb):
nbformat.write(nb, notebook_filename)
def _setup_logger(debug=False):
logger = logging.getLogger(__name__)
level = logging.DEBUG if debug else logging.ERROR
logger.setLevel(level)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logger.addHandler(sh)
def _expand_path(notebook_path):
return glob.glob(notebook_path)
class NBToolz(object):
def __init__(self, debug=False):
super().__init__()
_setup_logger(debug=debug)
self._logger = logging.getLogger(__name__)
self._functions_list = []
def read(self, *notebooks):
""" Read from the files specified
Accepts wildcard arguments such as *.ipynb
Args:
*notebooks: One or more notebooks
Returns:
NBToolz
"""
self._notebooks = list(chain(*map(_expand_path, notebooks)))
return self
def read_stdin(self):
""" Set it to read from stdin
Mainly meant to be used with git filters
Returns:
NBToolz
"""
self._notebooks = [sys.stdin]
return self
def strip_output(self, strip_tag="stripout"):
"""Strip the output of tagged cells
Args:
strip_tag: Tag to search for
Returns:
NBToolz
"""
self._logger.debug(f"Adding function: Strip out tag {strip_tag}")
self._functions_list.append(_strip_sensitive(strip_tag=strip_tag))
return self
def replace(self, find_str, replace_str):
"""Replace strings in the notebook cells
Args:
find_str: String to search for
replace_str: String to replace with
Returns:
NBToolz
"""
self._logger.debug(f"Adding function: Replace {find_str} with {replace_str}")
self._functions_list.append(_replace_strings(find_str, replace_str))
return self
def _execute(self):
self._logger.info("Executing...")
functions = compose(*reversed(self._functions_list))
for nb_filename in self._notebooks:
self._logger.info(f"Reading from {nb_filename}")
nb = _read_notebook(nb_filename)
nb.cells = list(map(functions, nb.cells))
yield nb
def print(self):
"""Print output of transformation
Returns:
None
"""
for nb in self._execute():
print(json.dumps(nb, indent=4))
def write(self, *output):
"""Write output to notebook file
Args:
output: One or more files to write the output to
Returns:
None
"""
for nb, out in zip(self._execute(), output):
self._logger.info(f"Writing to {out}")
_write_notebook(out, nb)
def overwrite(self):
"""Overwrite the input notebook
Returns:
None
"""
self.write(*self._notebooks)
if __name__ == "__main__":
import fire
fire.Fire(NBToolz)
``` |
{
"source": "JiatengTao/speaker-verification-api",
"score": 2
} |
#### File: django-doubtfire-api/endpoint/views.py
```python
from django.shortcuts import render, redirect
from django.http import JsonResponse, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.core.exceptions import ObjectDoesNotExist
from django.db import IntegrityError, DataError, connection
from django_redis import get_redis_connection
from endpoint.tasks import celery_enroll_user, celery_validate_user
from django.conf import settings
import json
import urllib.error
# Create your views here.
"""
{
"id": 123456789,
"recording_link": "https://speaker-ver-api-td.s3-ap-southeast-2.amazonaws.com/enrollment.flac"
}
"""
@csrf_exempt
def enroll_user(request):
if request.method == "POST":
response_data = {"success": False}
json_data = json.loads(request.body)
try:
user_id = json_data["id"]
recording_link = json_data["recording_link"]
celery_enroll_user.delay(user_id, recording_link)
response_data["success"] = True
except ValueError:
response_data["error"] = "Field ID is invalid. Expected an interger."
except DataError:
response_data["error"] = "Integer for ID is out of range."
except IntegrityError:
response_data["error"] = "User is already enrolled"
except urllib.error.HTTPError as exception:
response_data["error"] = f"HTTPError - {exception}"
except KeyError:
response_data["error"] = "Malformed Data"
return JsonResponse(response_data)
@csrf_exempt
def validate_recording(request):
if request.method == "POST":
response_data = {"success": False}
json_data = json.loads(request.body)
try:
user_id = json_data["id"]
recording_link = json_data["recording_link"]
celery_async_result = celery_validate_user.delay(user_id, recording_link)
score = celery_async_result.get()
response_data["success"] = True
response_data["data"] = {"score": score}
except KeyError:
response_data["error"] = "Malformed Data"
except urllib.error.HTTPError as exception:
response_data["error"] = f"HTTPError - {exception}"
except ObjectDoesNotExist:
response_data["error"] = "User does not exist"
return JsonResponse(response_data)
def check_redis_health(request):
get_redis_connection().ping()
connection.ensure_connection()
return HttpResponse("Redis is connected successfully")
def redirect_flower_dashboard(request):
FLOWER_URL = settings.FLOWER_URL
return redirect(FLOWER_URL)
``` |
{
"source": "JiatianWu/tf-monodepth2",
"score": 3
} |
#### File: tf-monodepth2/densify/flow_color.py
```python
import sys
import numpy as np
def makeColorwheel():
# color encoding scheme
# adapted from the color circle idea described at
# http://members.shaw.ca/quadibloc/other/colint.htm
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3]) # r g b
col = 0
#RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.floor(255*np.arange(0, RY, 1)/RY)
col += RY
#YG
colorwheel[col:YG+col, 0]= 255 - np.floor(255*np.arange(0, YG, 1)/YG)
colorwheel[col:YG+col, 1] = 255;
col += YG;
#GC
colorwheel[col:GC+col, 1]= 255
colorwheel[col:GC+col, 2] = np.floor(255*np.arange(0, GC, 1)/GC)
col += GC;
#CB
colorwheel[col:CB+col, 1]= 255 - np.floor(255*np.arange(0, CB, 1)/CB)
colorwheel[col:CB+col, 2] = 255
col += CB;
#BM
colorwheel[col:BM+col, 2]= 255
colorwheel[col:BM+col, 0] = np.floor(255*np.arange(0, BM, 1)/BM)
col += BM;
#MR
colorwheel[col:MR+col, 2]= 255 - np.floor(255*np.arange(0, MR, 1)/MR)
colorwheel[col:MR+col, 0] = 255
return colorwheel
def computeColor(u, v):
colorwheel = makeColorwheel();
nan_u = np.isnan(u)
nan_v = np.isnan(v)
nan_u = np.where(nan_u)
nan_v = np.where(nan_v)
u[nan_u] = 0
u[nan_v] = 0
v[nan_u] = 0
v[nan_v] = 0
ncols = colorwheel.shape[0]
radius = np.sqrt(u**2 + v**2)
a = np.arctan2(-v, -u) / np.pi
fk = (a+1) /2 * (ncols-1) # -1~1 maped to 1~ncols
k0 = fk.astype(np.uint8) # 1, 2, ..., ncols
k1 = k0+1;
k1[k1 == ncols] = 0
f = fk - k0
img = np.empty([k1.shape[0], k1.shape[1],3])
ncolors = colorwheel.shape[1]
for i in range(ncolors):
tmp = colorwheel[:,i]
col0 = tmp[k0]/255
col1 = tmp[k1]/255
col = (1-f)*col0 + f*col1
idx = radius <= 1
col[idx] = 1 - radius[idx]*(1-col[idx]) # increase saturation with radius
col[~idx] *= 0.75 # out of range
img[:,:,2-i] = np.floor(255*col).astype(np.uint8)
return img.astype(np.uint8)
def computeImg(flow):
eps = sys.float_info.epsilon
UNKNOWN_FLOW_THRESH = 1e9
UNKNOWN_FLOW = 1e10
u = flow[: , : , 0]
v = flow[: , : , 1]
maxu = -999
maxv = -999
minu = 999
minv = 999
maxrad = -1
#fix unknown flow
greater_u = np.where(u > UNKNOWN_FLOW_THRESH)
greater_v = np.where(v > UNKNOWN_FLOW_THRESH)
u[greater_u] = 0
u[greater_v] = 0
v[greater_u] = 0
v[greater_v] = 0
maxu = max([maxu, np.amax(u)])
minu = min([minu, np.amin(u)])
maxv = max([maxv, np.amax(v)])
minv = min([minv, np.amin(v)])
rad = np.sqrt(np.multiply(u,u)+np.multiply(v,v))
maxrad = max([maxrad, np.amax(rad)])
u = u/(maxrad+eps)
v = v/(maxrad+eps)
img = computeColor(u, v)
return img
```
#### File: tf-monodepth2/kinect_tools/kinect_depth_process.py
```python
import argparse
import open3d as o3d
import os
import json
import glob
import sys
import pickle
from PIL import Image
import numpy as np
class KinectDataSource:
def __init__(self, input, output):
self.input = input
self.output = output
self.crop_corner = (160, 0) # (x, y)
self.crop_size = (960, 720) # (width, height)
self.output_size = (640, 480) # (width, height)
self.zoom_x = float(self.output_size[0] / self.crop_size[0])
self.zoom_y = float(self.output_size[1] / self.crop_size[1])
self.reader = o3d.io.AzureKinectMKVReader()
self.reader.open(self.input)
if not self.reader.is_opened():
raise RuntimeError("Unable to open file {}".format(args.input))
def concat_image_seq(self, seq):
for i, image in enumerate(seq):
if i == 0:
res = image
else:
res = np.hstack((res, image))
return res
def load_intrinsics_raw(self, metadata_dict):
intrinsic_matrix = metadata_dict['intrinsic_matrix']
P = np.transpose(np.reshape(intrinsic_matrix, (3, 3)))
return P
def crop_intrinsics(self, mat):
out = np.copy(mat)
out[0,2] -= self.crop_corner[0]
out[1,2] -= self.crop_corner[1]
return out
def scale_intrinsics(self, mat, sx, sy):
out = np.copy(mat)
out[0,0] *= sx
out[0,2] *= sx
out[1,1] *= sy
out[1,2] *= sy
return out
def write_intrinsics(self, intrinsics, path):
fx = intrinsics[0, 0]
fy = intrinsics[1, 1]
cx = intrinsics[0, 2]
cy = intrinsics[1, 2]
with open(path, 'w') as f:
f.write('%f,0.,%f,0.,%f,%f,0.,0.,1.' % (fx, cx, fy, cy))
def generate_train_txt(self):
with open(self.output + '/train.txt', 'w') as tf:
train_data_dir = self.output + '/train_data'
frame_ids = os.listdir(train_data_dir)
for frame in frame_ids:
if '.jpg' in frame:
tf.write('%s %s\n' % ('train_data', frame[:-4]))
def preprocess_image(self, image):
color_image = Image.fromarray(image)
color_image = color_image.crop(box=(self.crop_corner[0],
self.crop_corner[1],
self.crop_corner[0] + self.crop_size[0],
self.crop_corner[1] + self.crop_size[1]))
color_image = color_image.resize(self.output_size)
return color_image
def run_rgb_only(self):
idx = 0
while not self.reader.is_eof():
rgbd = self.reader.next_frame()
if rgbd is None:
continue
if self.output is not None:
color_image = self.preprocess_image(np.asarray(rgbd.color))
color_filename = '{0}/train_data/{1:06d}.jpg'.format(
self.output, idx)
print('Writing to {}'.format(color_filename))
color_image.save(color_filename)
idx += 1
self.reader.close()
def run(self):
if self.output is not None:
abspath = os.path.abspath(self.output)
metadata = self.reader.get_metadata()
o3d.io.write_azure_kinect_mkv_metadata(
'{}/intrinsic.json'.format(abspath), metadata)
with open('{}/intrinsic.json'.format(abspath)) as json_file:
metadata_dict = json.load(json_file)
intrinsics_raw = self.load_intrinsics_raw(metadata_dict)
intrinsics_crop = self.crop_intrinsics(intrinsics_raw)
intrinsics_scale = self.scale_intrinsics(intrinsics_crop, self.zoom_x, self.zoom_y)
idx = 0
image_seq = []
depth_seq = []
while not self.reader.is_eof():
rgbd = self.reader.next_frame()
if rgbd is None:
continue
if self.output is not None:
color_image = self.preprocess_image(np.asarray(rgbd.color))
depth_image = self.preprocess_image(np.asarray(rgbd.depth))
if len(image_seq) < 2:
image_seq.append(np.array(color_image))
depth_seq.append(np.array(depth_image))
else:
color_filename = '{0}/train_data/{1:06d}.jpg'.format(
self.output, idx)
print('Writing to {}'.format(color_filename))
image_seq.append(np.array(color_image))
tosave_image_seq = self.concat_image_seq(image_seq)
Image.fromarray(tosave_image_seq).save(color_filename)
depth_filename = '{0}/depth_data/{1:06d}.pkl'.format(
self.output, idx)
print('Writing to {}'.format(depth_filename))
depth_seq.append(np.array(depth_image))
tosave_depth_seq = self.concat_image_seq(depth_seq)
data_dict = {'depth_gt': tosave_depth_seq}
output = open(depth_filename, 'wb')
pickle.dump(data_dict, output)
output.close()
intrinsics_filename = '{0}/train_data/{1:06d}_cam.txt'.format(self.output, idx)
self.write_intrinsics(intrinsics_scale, intrinsics_filename)
idx += 1
image_seq.pop(0)
depth_seq.pop(0)
self.reader.close()
self.generate_train_txt()
if __name__ == '__main__':
import time
parser = argparse.ArgumentParser(description='Azure kinect mkv reader.')
parser.add_argument('--input',
type=str,
required=True,
help='input mkv file')
parser.add_argument('--output',
type=str,
help='output path to store color/ and depth/ images')
args = parser.parse_args()
if args.input is None:
parser.print_help()
exit()
args.output += time.ctime()
if args.output is None:
print('No output path, only play mkv')
else:
try:
os.mkdir(args.output)
os.mkdir('{}/train_data'.format(args.output))
os.mkdir('{}/depth_data'.format(args.output))
except (PermissionError, FileExistsError):
print('Unable to mkdir {}, only play mkv'.format(args.output))
args.output = None
reader = KinectDataSource(args.input, args.output)
reader.run()
```
#### File: tf-monodepth2/model_new/monodepth2_learner.py
```python
from __future__ import division
import os
import time
import math
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from dataloader.data_loader import DataLoader
from model.net import Net
from utils.tools import *
import matplotlib as mpl
import matplotlib.cm as cm
from tensorflow.python.ops import control_flow_ops
class MonoDepth2Learner(object):
def __init__(self, **config):
self.config = config
self.preprocess = self.config['dataset']['preprocess']
self.min_depth = np.float(self.config['dataset']['min_depth'])
self.max_depth = np.float(self.config['dataset']['max_depth'])
self.root_dir = self.config['model']['root_dir']
self.pose_type = self.config['model']['pose_type']
self.resize_bilinear = False
def preprocess_image(self, image):
image = (image - 0.45) / 0.225
return image
def compute_reprojection_loss(self, reproj_image, tgt_image):
l1_loss = tf.reduce_mean(
tf.abs(reproj_image-tgt_image), axis=3, keepdims=True)
ssim_loss = tf.reduce_mean(
self.SSIM(reproj_image, tgt_image), axis=3, keepdims=True)
loss = self.ssim_ratio * ssim_loss + (1 - self.ssim_ratio) * l1_loss
#loss = l1_loss
return loss
def SSIM(self, x, y):
C1 = 0.01 ** 2
C2 = 0.03 ** 2
x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], mode='REFLECT')
y = tf.pad(y, [[0, 0], [1, 1], [1, 1], [0, 0]], mode='REFLECT')
mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')
mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')
sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2
sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2
sigma_xy = slim.avg_pool2d(x * y, 3, 1, 'VALID') - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)
SSIM = SSIM_n / SSIM_d
return tf.clip_by_value((1 - SSIM) / 2, 0, 1)
def get_smooth_loss(self, disp, img):
norm_disp = disp / (tf.reduce_mean(disp, [1, 2], keepdims=True) + 1e-7)
grad_disp_x = tf.abs(norm_disp[:, :-1, :, :] - norm_disp[:, 1:, :, :])
grad_disp_y = tf.abs(norm_disp[:, :, :-1, :] - norm_disp[:, :, 1:, :])
grad_img_x = tf.abs(img[:, :-1, :, :] - img[:, 1:, :, :])
grad_img_y = tf.abs(img[:, :, :-1, :] - img[:, :, 1:, :])
weight_x = tf.exp(-tf.reduce_mean(grad_img_x, 3, keepdims=True))
weight_y = tf.exp(-tf.reduce_mean(grad_img_y, 3, keepdims=True))
smoothness_x = grad_disp_x * weight_x
smoothness_y = grad_disp_y * weight_y
return tf.reduce_mean(smoothness_x) + tf.reduce_mean(smoothness_y)
def build_test(self, build_type='both'):
self.loader = DataLoader(trainable=False, **self.config)
self.num_scales = self.loader.num_scales
self.num_source = self.loader.num_source
with tf.name_scope('data_loading'):
self.tgt_image_uint8 = tf.placeholder(tf.uint8, [self.loader.batch_size,
self.loader.img_height, self.loader.img_width, 3])
self.tgt_image = tf.image.convert_image_dtype(
self.tgt_image_uint8, dtype=tf.float32)
tgt_image_net = self.preprocess_image(self.tgt_image)
if build_type != 'depth':
self.src_image_stack_uint8 = tf.placeholder(tf.uint8, [self.loader.batch_size,
self.loader.img_height, self.loader.img_width, 3 * self.num_source])
self.src_image_stack = tf.image.convert_image_dtype(
self.src_image_stack_uint8, dtype=tf.float32)
src_image_stack_net = self.preprocess_image(
self.src_image_stack)
with tf.variable_scope('monodepth2_model', reuse=tf.AUTO_REUSE) as scope:
net_builder = Net(False, **self.config)
res18_tc, skips_tc = net_builder.build_resnet18(tgt_image_net)
pred_disp = net_builder.build_disp_net(res18_tc, skips_tc)
pred_disp_rawscale = [tf.image.resize_nearest_neighbor(pred_disp[i], [self.loader.img_height, self.loader.img_width]) for i in
range(self.num_scales)]
pred_depth_rawscale = disp_to_depth(
pred_disp_rawscale, self.min_depth, self.max_depth)
self.pred_depth = pred_depth_rawscale[0]
self.pred_disp = pred_disp_rawscale[0]
if build_type != 'depth':
num_source = np.int(
src_image_stack_net.get_shape().as_list()[-1] // 3)
assert num_source == 2
if self.pose_type == 'seperate':
res18_ctp, _ = net_builder.build_resnet18(
tf.concat([src_image_stack_net[:, :, :, :3],
tgt_image_net], axis=3),
prefix='pose_'
)
res18_ctn, _ = net_builder.build_resnet18(
tf.concat(
[tgt_image_net, src_image_stack_net[:, :, :, 3:]], axis=3),
prefix='pose_'
)
elif self.pose_type == 'shared':
res18_tp, _ = net_builder.build_resnet18(
src_image_stack_net[:, :, :, :3])
res18_tn, _ = net_builder.build_resnet18(
src_image_stack_net[:, :, :, 3:])
res18_ctp = tf.concat([res18_tc, res18_tp], axis=3)
res18_ctn = tf.concat([res18_tc, res18_tn], axis=3)
else:
raise NotImplementedError
pred_pose_ctp = net_builder.build_pose_net2(res18_ctp)
pred_pose_ctn = net_builder.build_pose_net2(res18_ctn)
pred_poses = tf.concat([pred_pose_ctp, pred_pose_ctn], axis=1)
self.pred_poses = pred_poses
def build_train(self):
self.ssim_ratio = np.float(self.config['model']['reproj_alpha'])
self.smoothness_ratio = np.float(self.config['model']['smooth_alpha'])
self.start_learning_rate = np.float(
self.config['model']['learning_rate'])
self.total_epoch = np.int(self.config['model']['epoch'])
self.beta1 = np.float(self.config['model']['beta1'])
self.continue_ckpt = self.config['model']['continue_ckpt']
self.torch_res18_ckpt = self.config['model']['torch_res18_ckpt']
self.summary_freq = self.config['model']['summary_freq']
self.auto_mask = self.config['model']['auto_mask']
loader = DataLoader(trainable=True, **self.config)
self.num_scales = loader.num_scales
self.num_source = loader.num_source
with tf.name_scope('data_loading'):
tgt_image, src_image_stack, tgt_image_aug, src_image_stack_aug, intrinsics = loader.load_batch()
tgt_image = tf.image.convert_image_dtype(
tgt_image, dtype=tf.float32)
src_image_stack = tf.image.convert_image_dtype(
src_image_stack, dtype=tf.float32)
tgt_image_aug = tf.image.convert_image_dtype(
tgt_image_aug, dtype=tf.float32)
src_image_stack_aug = tf.image.convert_image_dtype(
src_image_stack_aug, dtype=tf.float32)
if self.preprocess:
tgt_image_net = self.preprocess_image(tgt_image_aug)
src_image_stack_net = self.preprocess_image(
src_image_stack_aug)
else:
tgt_image_net = tgt_image_aug
src_image_stack_net = src_image_stack_aug
with tf.variable_scope('monodepth2_model', reuse=tf.AUTO_REUSE) as scope:
net_builder = Net(True, **self.config)
num_source = np.int(
src_image_stack_net.get_shape().as_list()[-1] // 3)
assert num_source == 2
res18_tc, skips_tc = net_builder.build_resnet18(tgt_image_net)
if self.pose_type == 'seperate':
res18_ctp, _ = net_builder.build_resnet18(
tf.concat([src_image_stack_net[:, :, :, :3],
tgt_image_net], axis=3),
prefix='pose_'
)
res18_ctn, _ = net_builder.build_resnet18(
tf.concat(
[tgt_image_net, src_image_stack_net[:, :, :, 3:]], axis=3),
prefix='pose_'
)
elif self.pose_type == 'shared':
res18_tp, _ = net_builder.build_resnet18(
src_image_stack_net[:, :, :, :3])
res18_tn, _ = net_builder.build_resnet18(
src_image_stack_net[:, :, :, 3:])
res18_ctp = tf.concat([res18_tp, res18_tc], axis=3)
res18_ctn = tf.concat([res18_tc, res18_tn], axis=3)
else:
raise NotImplementedError
pred_pose_ctp = net_builder.build_pose_net2(res18_ctp)
pred_pose_ctn = net_builder.build_pose_net2(res18_ctn)
pred_poses = tf.concat([pred_pose_ctp, pred_pose_ctn], axis=1)
# res18_tp, _ = net_builder.build_resnet18(src_image_stack_net[:,:,:,:3])
# res18_tn, _= net_builder.build_resnet18(src_image_stack_net[:,:,:,3:])
#
# pred_poses = net_builder.build_pose_net(res18_tp, res18_tc, res18_tn)
if self.resize_bilinear:
pred_disp = net_builder.build_disp_net_bilinear(res18_tc, skips_tc)
else:
pred_disp = net_builder.build_disp_net(res18_tc, skips_tc)
H = tgt_image.get_shape().as_list()[1]
W = tgt_image.get_shape().as_list()[2]
pred_disp_rawscale = [tf.image.resize_nearest_neighbor(
pred_disp[i], [loader.img_height, loader.img_width]) for i in range(self.num_scales)]
pred_depth_rawscale = disp_to_depth(
pred_disp_rawscale, self.min_depth, self.max_depth)
tgt_image_pyramid = [tf.image.resize_nearest_neighbor(tgt_image, [np.int(
H // (2 ** s)), np.int(W // (2 ** s))]) for s in range(self.num_scales)]
with tf.name_scope('compute_loss'):
tgt_image_stack_all = []
src_image_stack_all = []
proj_image_stack_all = []
proj_error_stack_all = []
pixel_losses = 0.
smooth_losses = 0.
total_loss = 0.
if self.auto_mask:
# pred_auto_masks1 = []
# pred_auto_masks2 = []
pred_auto_masks = []
for s in range(loader.num_scales):
reprojection_losses = []
for i in range(num_source):
curr_proj_image = projective_inverse_warp(src_image_stack[:, :, :, 3*i:3*(i+1)],
tf.squeeze(
pred_depth_rawscale[s], axis=3),
pred_poses[:,
i, :],
intrinsics=intrinsics[:, 0, :, :], invert=True if i == 0 else False)
curr_proj_error = tf.abs(curr_proj_image - tgt_image)
reprojection_losses.append(
self.compute_reprojection_loss(curr_proj_image, tgt_image))
if i == 0:
proj_image_stack = curr_proj_image
proj_error_stack = curr_proj_error
else:
proj_image_stack = tf.concat(
[proj_image_stack, curr_proj_image], axis=3)
proj_error_stack = tf.concat(
[proj_error_stack, curr_proj_error], axis=3)
reprojection_losses = tf.concat(reprojection_losses, axis=3)
combined = reprojection_losses
if self.auto_mask:
identity_reprojection_losses = []
for i in range(num_source):
identity_reprojection_losses.append(self.compute_reprojection_loss(
src_image_stack[:, :, :, 3*i:3*(i+1)], tgt_image))
identity_reprojection_losses = tf.concat(
identity_reprojection_losses, axis=3)
identity_reprojection_losses += (tf.random_normal(
identity_reprojection_losses.get_shape()) * 1e-5)
combined = tf.concat(
[identity_reprojection_losses, reprojection_losses], axis=3)
pred_auto_masks.append(tf.expand_dims(
tf.cast(tf.argmin(combined, axis=3) > 1, tf.float32) * 255, -1))
# pred_auto_masks1.append(tf.expand_dims(tf.cast(tf.argmin(tf.concat([combined[:,:,:,1:2],combined[:,:,:,3:4]],axis=3), axis=3), tf.float32) * 255,-1))
# pred_auto_masks2.append(tf.expand_dims(tf.cast(
# tf.argmin(combined, axis=3) > 1,
# tf.float32) * 255, -1))
reprojection_loss = tf.reduce_mean(
tf.reduce_min(combined, axis=3))
pixel_losses += reprojection_loss
smooth_loss = self.get_smooth_loss(
pred_disp[s], tgt_image_pyramid[s])
smooth_losses += smooth_loss
smooth_loss /= (2 ** s)
scale_total_loss = reprojection_loss + self.smoothness_ratio * smooth_loss
total_loss += scale_total_loss
tgt_image_stack_all.append(tgt_image)
src_image_stack_all.append(src_image_stack_aug)
proj_image_stack_all.append(proj_image_stack)
proj_error_stack_all.append(proj_error_stack)
total_loss /= loader.num_scales
pixel_losses /= loader.num_scales
smooth_losses /= loader.num_scales
with tf.name_scope('train_op'):
self.total_step = self.total_epoch * loader.steps_per_epoch
self.global_step = tf.Variable(
0, name='global_step', trainable=False)
learning_rates = [self.start_learning_rate,
self.start_learning_rate / 10]
boundaries = [np.int(self.total_step * 3 / 4)]
self.learning_rate = tf.train.piecewise_constant(
self.global_step, boundaries, learning_rates)
optimizer = tf.train.AdamOptimizer(self.learning_rate, self.beta1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_op = optimizer.minimize(
total_loss, global_step=self.global_step)
self.incr_global_step = tf.assign(
self.global_step, self.global_step + 1)
# Collect tensors that are useful later (e.g. tf summary)
self.pred_depth = pred_depth_rawscale
self.pred_disp = pred_disp
self.pred_poses = pred_poses
self.steps_per_epoch = loader.steps_per_epoch
self.total_loss = total_loss
self.pixel_loss = pixel_losses
self.smooth_loss = smooth_losses
self.tgt_image_all = tgt_image_stack_all
self.src_image_stack_all = src_image_stack_all
self.proj_image_stack_all = proj_image_stack_all
self.proj_error_stack_all = proj_error_stack_all
if self.auto_mask:
self.pred_auto_masks = pred_auto_masks
def collect_summaries(self):
tf.summary.scalar("total_loss", self.total_loss)
tf.summary.scalar("pixel_loss", self.pixel_loss)
tf.summary.scalar("smooth_loss", self.smooth_loss)
tf.summary.image('tgt_image', self.tgt_image_all[0])
for s in range(self.num_scales):
tf.summary.image('scale{}_disparity_color_image'.format(
s), colorize(self.pred_disp[s], cmap='plasma'))
tf.summary.image('scale{}_disparity_gray_image'.format(
s), normalize_image(self.pred_disp[s]))
if self.auto_mask:
tf.summary.image('scale{}_automask_image'.format(
s), self.pred_auto_masks[s])
# tf.summary.image('scale{}_automask2_image'.format(s), self.pred_auto_masks2[s])
for i in range(self.num_source):
tf.summary.image('scale{}_projected_image_{}'.format(s, i),
self.proj_image_stack_all[s][:, :, :, i * 3:(i + 1) * 3])
tf.summary.image('scale{}_proj_error_{}'.format(s, i),
self.proj_error_stack_all[s][:, :, :, i * 3:(i + 1) * 3])
tf.summary.histogram("tx", self.pred_poses[:, :, 0])
tf.summary.histogram("ty", self.pred_poses[:, :, 1])
tf.summary.histogram("tz", self.pred_poses[:, :, 2])
tf.summary.histogram("rx", self.pred_poses[:, :, 3])
tf.summary.histogram("ry", self.pred_poses[:, :, 4])
tf.summary.histogram("rz", self.pred_poses[:, :, 5])
def train(self, ckpt_dir):
self.build_train()
init = tf.global_variables_initializer()
self.collect_summaries()
# load weights from pytorch resnet 18 model
if self.torch_res18_ckpt != '':
assign_ops = load_resnet18_from_file(self.torch_res18_ckpt)
with tf.name_scope("parameter_count"):
parameter_count = tf.reduce_sum(
[tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])
var_list = [var for var in tf.global_variables()
if "moving" in var.name]
var_list += tf.trainable_variables()
self.saver = tf.train.Saver(
var_list + [self.global_step], max_to_keep=10)
sv = tf.train.Supervisor(
logdir=ckpt_dir, save_summaries_secs=0, saver=None)
# print('/n/n/nCollections=====================',tf.get_collection(tf.GraphKeys.UPDATE_OPS))
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with sv.managed_session(config=config) as sess:
# print('Trainable variables: ')
# for var in var_list:
# print(var.name)
#
# print('\n\n==========================================')
# print('Model variables:')
# for var in tf.model_variables():
# print(var.name)
#
# print('\n\n==========================================')
# print('Global variables:')
# for var in tf.global_variables():
# print(var.name)
print("parameter_count =", sess.run(parameter_count))
sess.run(init)
if self.continue_ckpt != '':
print("Resume training from previous checkpoint: %s" %
self.continue_ckpt)
# ckpt = tf.train.latest_checkpoint('{}/{}'.format(self.root_dir,self.continue_ckpt))
self.saver.restore(sess, self.continue_ckpt)
elif self.torch_res18_ckpt != '':
sess.run(assign_ops)
start_time = time.time()
try:
for step in range(0, self.total_step):
fetches = {
"train": self.train_op,
"global_step": self.global_step,
"incr_global_step": self.incr_global_step
}
if step % self.summary_freq == 0:
fetches["loss"] = self.total_loss
fetches["pixel_loss"] = self.pixel_loss
fetches["smooth_loss"] = self.smooth_loss
fetches["summary"] = sv.summary_op
fetches["lr"] = self.learning_rate
results = sess.run(fetches)
gs = results["global_step"]
if step % self.summary_freq == 0:
sv.summary_writer.add_summary(results["summary"], gs)
train_epoch = math.ceil(gs / self.steps_per_epoch)
train_step = gs - (train_epoch - 1) * \
self.steps_per_epoch
print("Epoch: [{}] | [{}/{}] | time: {:.4f} s/it | loss: {:.4f} pixel_loss: {:.4f} smooth_loss: {:.4f} | lr: {:.5f}".format
(train_epoch, train_step, self.steps_per_epoch,
(time.time() - start_time) /
self.summary_freq,
results["loss"], results["pixel_loss"], results["smooth_loss"], results["lr"]))
start_time = time.time()
if step != 0 and step % (self.steps_per_epoch * 2) == 0:
self.save(sess, ckpt_dir, gs)
except:
self.save(sess, ckpt_dir, 'latest')
self.save(sess, ckpt_dir, 'latest')
def eval_depth(self, sess, ckpt_name):
with open('data/kitti/test_files_eigen.txt', 'r') as f:
test_files = f.readlines()
test_files = [self.config['dataset']['root_dir'] + t[:-1]
for t in test_files]
if not os.path.exists(self.config['output_dir']):
os.makedirs(self.config['output_dir'])
if not os.path.exists('{}/depth'.format(self.config['output_dir'])):
os.makedirs('{}/depth'.format(self.config['output_dir']))
basename = os.path.basename(ckpt_name)
output_file = self.config['output_dir'] + '/depth/' + basename
print('[MSG] save path: {}'.format(output_file))
pred_all = []
import cv2
print(len(test_files))
for t in range(len(test_files)//self.loader.batch_size + 1):
tgt_image_np_batch = []
for d in range(self.loader.batch_size):
image = cv2.imread(
test_files[t * self.loader.batch_size % len(test_files)])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(
image, (self.loader.img_width, self.loader.img_height), cv2.INTER_AREA)
image = np.expand_dims(image, axis=0)
if d == 0:
tgt_image_np_batch = image
else:
tgt_image_np_batch = np.vstack((tgt_image_np_batch, image))
fetches = {
'depth': self.pred_depth,
'disp': self.pred_disp
}
results = sess.run(fetches, feed_dict={
self.tgt_image_uint8: tgt_image_np_batch})
pred_depth = np.squeeze(results['depth'])
if len(pred_depth.shape) == 2:
pred_depth = np.expand_dims(pred_depth, axis=0)
if t == 0:
pred_all = pred_depth
else:
pred_all = np.vstack((pred_all, pred_depth))
# pred_all.append(pred_depth)
cv2.waitKey()
pred_all = pred_all[:len(test_files)]
print(np.array(pred_all).shape, '--------------------------')
np.save(output_file, pred_all)
def eval_pose(self, sess, ckpt_name):
raise NotImplementedError
def eval(self, ckpt_name, eval_type):
self.build_test(build_type=eval_type)
var_list = [var for var in tf.global_variables()
if "moving" in var.name]
var_list += tf.trainable_variables()
self.saver = tf.train.Saver(var_list, max_to_keep=10)
# for var in var_list:
# print(var)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
if ckpt_name == '':
print('No pretrained model provided, exit...')
raise ValueError
print('load pretrained model from: {}'.format(ckpt_name))
latest_ckpt = tf.train.latest_checkpoint('{}'.format(ckpt_name))
self.saver.restore(sess, latest_ckpt)
if eval_type == 'depth':
self.eval_depth(sess, ckpt_name)
elif eval_type == 'pose':
self.eval_pose(sess, ckpt_name)
else:
raise ValueError
def test(self, ckpt_dir):
self.build_test()
var_list = [var for var in tf.global_variables()
if "moving" in var.name]
var_list += tf.trainable_variables()
self.saver = tf.train.Saver(var_list, max_to_keep=10)
# for var in tf.model_variables():
# print(var)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
if ckpt_dir == '':
print('No pretrained model provided, exit. ')
raise ValueError
print("load trained model")
print('load pretrained model from: {}'.format(ckpt_dir))
latest_ckpt = tf.train.latest_checkpoint('{}'.format(ckpt_dir))
self.saver.restore(sess, latest_ckpt)
file_list = self.loader.format_file_list(
self.loader.dataset_dir, 'val')
image_lists = file_list['image_file_list']
import cv2
# vid = cv2.VideoWriter('./result/depth.mp4', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), float(30), (640,576), True)
for step in range(1, len(image_lists)):
image = cv2.imread(image_lists[step])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
former_image_np = image[:, :self.loader.img_width, :]
tgt_image_np = image[:,
self.loader.img_width: self.loader.img_width*2, :]
next_image_np = image[:, self.loader.img_width*2:, :]
src_image_stack = np.concatenate(
(former_image_np, next_image_np), axis=2)
tgt_image_np = np.expand_dims(tgt_image_np, axis=0)
src_image_stack = np.expand_dims(src_image_stack, axis=0)
fetches = {
'depth': self.pred_depth,
'disp': self.pred_disp,
'poses': self.pred_poses
}
results = sess.run(fetches, feed_dict={
self.tgt_image_uint8: tgt_image_np, self.src_image_stack_uint8: src_image_stack})
disp_resized_np = np.squeeze(results['disp'])
vmax = np.percentile(disp_resized_np, 95)
normalizer = mpl.colors.Normalize(
vmin=disp_resized_np.min(), vmax=vmax)
mapper = mpl.cm.ScalarMappable(norm=normalizer, cmap='plasma')
colormapped_im = (mapper.to_rgba(disp_resized_np)[
:, :, :3][:, :, ::-1] * 255).astype(np.uint8)
disp_rgb = cv2.cvtColor(disp_resized_np, cv2.COLOR_GRAY2RGB)
disp_rgb_int = (disp_rgb * 255.).astype(np.uint8)
tgt_image_np = np.squeeze(tgt_image_np)
tgt_image_np = cv2.cvtColor(tgt_image_np, cv2.COLOR_BGR2RGB)
toshow_image = np.vstack(
(tgt_image_np, colormapped_im, disp_rgb_int))
print(toshow_image.shape)
cv2.imshow('depth', toshow_image)
# vid.write(toshow_image)
#cv2.imwrite('./result/predicted_depth/depth_{}.png'.format(step), toshow_image)
cv2.waitKey(30)
def save(self, sess, checkpoint_dir, step):
model_name = 'model'
print(" [*] Saving checkpoint to {}...".format(checkpoint_dir))
if step == 'latest':
self.saver.save(sess, os.path.join(
checkpoint_dir, model_name + '.latest'))
else:
self.saver.save(sess, os.path.join(
checkpoint_dir, model_name), global_step=step)
```
#### File: tf-monodepth2/utils/process_data.py
```python
import os
import pdb
import h5py
import pickle
import numpy as np
from scipy.io import loadmat
import cv2
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import csv
import bisect
import matplotlib as mpl
import matplotlib.cm as cm
import tensorflow as tf
# from bilateral_filter import bilateral_filter
from tools import *
def resave_imu_data():
dataset = '/freezer/nyudepthV2_raw'
seqs = os.listdir(dataset)
for seq in seqs:
seq_dir = dataset + '/' + seq
for data in os.listdir(seq_dir):
if data[0] == 'a':
imu_data_path = seq_dir + '/' + data
resave_imu_data_path = seq_dir + '/' + data[:-4] + '.txt'
call_resave_imu(imu_data_path, resave_imu_data_path)
def call_resave_imu(orig_path, resave_path):
command = './resave_imu ' + orig_path + ' ' + resave_path
os.system(command)
def collect_acc_data(folder):
data_list = []
for file in os.listdir(folder):
if file[0] == 'a' and file[-1] == 't':
data_list.append(folder + '/' + file)
return sorted(data_list)
def get_acc_timestamp(path):
return float(path.split('-')[1])
def read_acc_data(file_path):
timestamp = get_acc_timestamp(file_path)
file = open(file_path, 'r')
data = file.read().split(',')
for i in range(len(data)):
data[i] = float(data[i])
data.insert(0, timestamp)
return data
def plot_acc_data(folder):
acc_path = collect_acc_data(folder)
x = []
y = []
z = []
for path in acc_path:
data = read_acc_data(path)
x.append(data[1])
y.append(data[2])
z.append(data[3])
plt.plot(x)
plt.plot(y)
plt.plot(z)
plt.show()
def compute_acc_vel_pos(acc_data_1, acc_data_2, v_1, p_1):
t1 = acc_data_1[0]
t2 = acc_data_2[0]
t_delta = t2 - t1
acc_xyz_1 = np.array(acc_data_1[1:4])
acc_xyz_2 = np.array(acc_data_2[1:4])
a_avg = (acc_xyz_1 + acc_xyz_2) / 2.
v_2 = v_1 + a_avg * t_delta
p_2 = p_1 + v_1 * t_delta + a_avg * t_delta * t_delta / 2.
# pdb.set_trace()
return v_2, p_2
def plot_imu_traj(folder):
acc_path = collect_acc_data(folder)
p_x = []
p_y = []
p_z = []
v_cur = np.array([0., 0., 0.])
p_cur = np.array([0., 0., 0.])
N = len(acc_path)
for idx in range(N-1):
p_x.append(p_cur[0])
p_y.append(p_cur[1])
p_z.append(p_cur[2])
acc_1 = read_acc_data(acc_path[idx])
acc_2 = read_acc_data(acc_path[idx + 1])
v_cur, p_cur = compute_acc_vel_pos(acc_1, acc_2, v_cur, p_cur)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#ax.scatter(p_x[:], p_y[:], p_z[:0])
ax.plot(p_x[:-1], p_y[:-1], p_z[:-1])
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
# plt.plot(p_x)
# plt.plot(p_y)
# plt.plot(p_z)
# plt.show()
def plot_trajectory(data_file_name):
data = open(data_file_name,"rb")
poses_log = pickle.load(data)
poses_mat_log = []
import torch
for i in range(len(poses_log.keys())):
pose = poses_log[i]
pose = np.expand_dims(pose, axis=0)
pose = np.expand_dims(pose, axis=0)
pose_mat = transformation_from_parameters(torch.tensor(pose[:, :, :3]).float(), torch.tensor(pose[:, :, 3:]).float(), False)
poses_mat_log.append(pose_mat.numpy())
xyzs = np.array(dump_xyz(poses_mat_log))
xs = []
ys = []
zs = []
for i in range(xyzs.shape[0]):
xs.append(xyzs[i][0])
ys.append(xyzs[i][1])
zs.append(xyzs[i][2])
plt.plot(xs, ys)
plt.savefig(
'/home/jiatian/dataset/rs_eval/pose/' + str(i).zfill(6) + '.jpg')
def delete_folder(folder, num):
dirlist = sorted(os.listdir(folder))
num_total = len(dirlist)
for idx in range(num):
datapath = folder + '/' + dirlist[idx]
os.remove(datapath)
for idx in range(num_total - num, num_total):
datapath = folder + '/' + dirlist[idx]
os.remove(datapath)
def vis_rgb_pose_image(folder_1, folder_2):
dirlist_1 = sorted(os.listdir(folder_1))
dirlist_2 = sorted(os.listdir(folder_2))
for idx in range(len(dirlist_1)):
data_1 = Image.open(folder_1 + '/' + dirlist_1[idx]).convert('RGB')
data_2 = Image.open(folder_2 + '/' + dirlist_2[idx]).convert('RGB')
image_show = np.hstack((np.array(data_1), np.array(data_2)))
img = Image.fromarray(image_show)
img.save('/home/jiatian/dataset/rs_eval/comp/' + dirlist_2[idx])
def read_csv(path, folder):
dict_gt = {}
ts = []
with open(path, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
line_count += 1
line_count += 1
dict_gt[row['#timestamp']] = row
ts.append(row['#timestamp'])
print(f'Processed {line_count} lines.')
dict_img = {}
dirlist = sorted(os.listdir(folder))
match = 0
unmatch = 0
ts_valid = []
for img_path in dirlist:
key = img_path[:-4]
ts_valid.append(key)
try:
print(dict_gt[key])
match += 1
dict_img[key] = dict_gt[key]
except:
unmatch += 1
idx = bisect.bisect_left(ts, key)
if idx == len(ts):
idx -= 1
dict_img[key] = dict_gt[ts[idx]]
print('ERROR', key)
print('UNMATCH', unmatch)
# plot_xyz(ts_valid, dict_img)
calAxisAngle(ts_valid, dict_img)
def calAxisAngle(ts_valid, dict_img):
nums = len(ts_valid)
dict_pose = {}
for i in range(nums-2):
ts_cur = ts_valid[i]
ts_next = ts_valid[i+2]
data_cur = dict_img[ts_cur]
data_next = dict_img[ts_next]
diff_tx = float(data_next[' p_RS_R_x [m]']) - float(data_cur[' p_RS_R_x [m]'])
diff_ty = float(data_next[' p_RS_R_y [m]']) - float(data_cur[' p_RS_R_y [m]'])
diff_tz = float(data_next[' p_RS_R_z [m]']) - float(data_cur[' p_RS_R_z [m]'])
diff_qw = float(data_next[' q_RS_w []']) - float(data_cur[' q_RS_w []'])
diff_qx = float(data_next[' q_RS_x []']) - float(data_cur[' q_RS_x []'])
diff_qy = float(data_next[' q_RS_y []']) - float(data_cur[' q_RS_y []'])
diff_qz = float(data_next[' q_RS_z []']) - float(data_cur[' q_RS_z []'])
diff_norm = np.linalg.norm([diff_qw, diff_qx, diff_qy, diff_qz])
diff_qw = diff_qw / (diff_norm + 1e-7)
diff_qx = diff_qx / (diff_norm + 1e-7)
diff_qy = diff_qy / (diff_norm + 1e-7)
diff_qz = diff_qz / (diff_norm + 1e-7)
angle = 2 * np.arccos(diff_qw)
rx = diff_qx * angle / np.sqrt(1 - diff_qw * diff_qw)
ry = diff_qy * angle / np.sqrt(1 - diff_qw * diff_qw)
rz = diff_qz * angle / np.sqrt(1 - diff_qw * diff_qw)
dict_pose[ts_cur] = [rx, ry, rz, diff_tx, diff_ty, diff_tz]
dict_file_name = '/home/jiatian/dataset/euroc/V1_01_easy/mav0/cam0_pose.pkl'
f = open(dict_file_name, "wb")
pickle.dump(dict_pose, f)
f.close()
def plot_xyz(ts, dict):
xs = []
ys = []
zs = []
nums = len(ts)
for i in range(nums):
time = ts[i]
xs.append(float(dict[time][' p_RS_R_x [m]']))
ys.append(float(dict[time][' p_RS_R_y [m]']))
zs.append(float(dict[time][' p_RS_R_z [m]']))
# ' q_RS_w []' ' q_RS_x []' ' q_RS_y []' ' q_RS_z []'
plt.plot(xs, ys)
plt.savefig(
'/home/jiatian/dataset/euroc/V1_01_easy/mav0/cam0/traj' + str(i).zfill(6) + '.jpg')
if __name__ == "__main__":
read_csv('/home/jiatian/dataset/euroc/V1_01_easy/mav0/state_groundtruth_estimate0/data.csv', '/home/jiatian/dataset/euroc/V1_01_easy/mav0/cam0/data')
# vis_rgb_pose_image('/home/jiatian/dataset/recordvi/402-000-undistort', '/home/jiatian/dataset/rs_eval/pose')
# plot_trajectory('/home/jiatian/dataset/rs_eval/pose/poses_log.pickle')
# delete_folder('/home/jiatian/dataset/realsense/recordvi-4-02-00/402-004-undistort', 300)
```
#### File: tf-monodepth2/utils/test_open3d.py
```python
import os
import pdb
import h5py
import pickle
import numpy as np
from scipy.io import loadmat
import open3d as o3d
import cv2
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import matplotlib as mpl
import matplotlib.cm as cm
import tensorflow as tf
from bilateral_filter import bilateral_filter
from tools import *
def resave_image(path):
image = Image.open(path)
image.save(path[:-4] + '.png')
class CameraPose:
def __init__(self, meta, mat):
self.metadata = meta
self.pose = mat
def __str__(self):
return 'Metadata : ' + ' '.join(map(str, self.metadata)) + '\n' + \
"Pose : " + "\n" + np.array_str(self.pose)
def read_trajectory(filename):
traj = []
with open(filename, 'r') as f:
metastr = f.readline()
while metastr:
metadata = list(map(int, metastr.split()))
# import pdb; pdb.set_trace()
mat = np.zeros(shape=(4, 4))
for i in range(4):
matstr = f.readline()
mat[i, :] = np.fromstring(matstr, dtype=float, sep=' \t')
traj.append(CameraPose(metadata, mat))
metastr = f.readline()
return traj
def write_trajectory(traj, filename):
with open(filename, 'w') as f:
for x in traj:
p = x.pose.tolist()
f.write(' '.join(map(str, x.metadata)) + '\n')
f.write('\n'.join(
' '.join(map('{0:.12f}'.format, p[i])) for i in range(4)))
f.write('\n')
def rgbd_odometry_default():
test_data_folder = '/home/nod/project/Open3D/examples/TestData/'
pinhole_camera_intrinsic = o3d.io.read_pinhole_camera_intrinsic(
test_data_folder + 'camera_primesense.json')
print(pinhole_camera_intrinsic.intrinsic_matrix)
source_color = o3d.io.read_image(test_data_folder + 'RGBD/color/00000.jpg')
source_depth = o3d.io.read_image(test_data_folder + 'RGBD/depth/00000.png')
target_color = o3d.io.read_image(test_data_folder + 'RGBD/color/00001.jpg')
target_depth = o3d.io.read_image(test_data_folder + 'RGBD/depth/00001.png')
source_rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
source_color, source_depth)
target_rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
target_color, target_depth)
target_pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
target_rgbd_image, pinhole_camera_intrinsic)
option = o3d.odometry.OdometryOption()
odo_init = np.identity(4)
print(option)
# [success_color_term, trans_color_term, info] = o3d.odometry.compute_rgbd_odometry(
# source_rgbd_image, target_rgbd_image, pinhole_camera_intrinsic,
# odo_init, o3d.odometry.RGBDOdometryJacobianFromColorTerm(), option)
[success_hybrid_term, trans_hybrid_term, info] = o3d.odometry.compute_rgbd_odometry(
source_rgbd_image, target_rgbd_image, pinhole_camera_intrinsic,
odo_init, o3d.odometry.RGBDOdometryJacobianFromHybridTerm(), option)
# if success_color_term:
# print("Using RGB-D Odometry")
# print(trans_color_term)
# import pdb; pdb.set_trace()
# source_pcd_color_term = o3d.geometry.PointCloud.create_from_rgbd_image(
# source_rgbd_image, pinhole_camera_intrinsic)
# source_pcd_color_term.transform(trans_color_term)
# o3d.visualization.draw_geometries([target_pcd, source_pcd_color_term])
if success_hybrid_term:
print("Using Hybrid RGB-D Odometry")
print(trans_hybrid_term)
import pdb; pdb.set_trace()
source_pcd_hybrid_term = o3d.geometry.PointCloud.create_from_rgbd_image(
source_rgbd_image, pinhole_camera_intrinsic)
source_pcd_hybrid_term.transform(trans_hybrid_term)
o3d.visualization.draw_geometries([target_pcd, source_pcd_hybrid_term],
zoom=0.48,
front=[0.0999, -0.1787, -0.9788],
lookat=[0.0345, -0.0937, 1.8033],
up=[-0.0067, -0.9838, 0.1790])
def rgbd_odometry_nyu():
test_data_folder = '/home/nod/datasets/nyudepthV2/test_kitchen/'
pinhole_camera_intrinsic = o3d.io.read_pinhole_camera_intrinsic(
test_data_folder + 'camera_primesense.json')
print(pinhole_camera_intrinsic.intrinsic_matrix)
idx = 0
odo_log = []
cam_to_world = np.eye(4)
meta_str = str(idx) + ' ' + str(idx) + ' ' + str(idx + 1)
odo_log.append(CameraPose(meta_str, cam_to_world))
for idx in range(0, 103):
source_idx = str(idx).zfill(6)
source_color = o3d.io.read_image(test_data_folder + 'color/' + source_idx + '.jpg')
source_depth = o3d.io.read_image(test_data_folder + 'depth/' + source_idx + '.png')
np.asarray(source_depth)[np.asarray(source_depth) > np.percentile(np.asarray(source_depth), 80)] = 0
target_idx = str(idx + 1).zfill(6)
target_color = o3d.io.read_image(test_data_folder + 'color/' + target_idx + '.jpg')
target_depth = o3d.io.read_image(test_data_folder + 'depth/' + target_idx + '.png')
np.asarray(target_depth)[np.asarray(target_depth) > np.percentile(np.asarray(target_depth), 80)] = 0
source_rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
source_color, source_depth)
target_rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
target_color, target_depth)
target_pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
target_rgbd_image, pinhole_camera_intrinsic)
option = o3d.odometry.OdometryOption()
odo_init = np.identity(4)
print(option)
[success_hybrid_term, trans_hybrid_term, info] = o3d.odometry.compute_rgbd_odometry(
source_rgbd_image, target_rgbd_image, pinhole_camera_intrinsic,
odo_init, o3d.odometry.RGBDOdometryJacobianFromHybridTerm(), option)
if success_hybrid_term:
print("Using Hybrid RGB-D Odometry")
print(trans_hybrid_term)
meta_str = str(idx + 1) + ' ' + str(idx + 1) + ' ' + str(idx + 2)
cam_to_world = np.dot(cam_to_world, trans_hybrid_term)
odo_log.append(CameraPose(meta_str, cam_to_world))
# source_pcd_hybrid_term = o3d.geometry.PointCloud.create_from_rgbd_image(
# source_rgbd_image, pinhole_camera_intrinsic)
# source_pcd_hybrid_term.transform(trans_hybrid_term)
# o3d.visualization.draw_geometries([target_pcd, source_pcd_hybrid_term])
else:
print("FAIL ", idx)
return
write_trajectory(odo_log, '/home/nod/datasets/nyudepthV2/test_kitchen/odometry.log')
def dump_xyz(source_to_target_transformations):
xyzs = []
cam_to_world = np.eye(4)
xyzs.append(cam_to_world[:3, 3])
for source_to_target_transformation in source_to_target_transformations:
cam_to_world = np.dot(cam_to_world, source_to_target_transformation[0, :, :])
xyzs.append(cam_to_world[:3, 3])
return xyzs
def tsdf():
test_data_folder = '/home/nod/datasets/nyudepthV2/test_kitchen/'
camera_intrinsics = o3d.io.read_pinhole_camera_intrinsic(
test_data_folder + 'camera_primesense.json')
camera_poses = read_trajectory(test_data_folder + 'odometry.log')
volume = o3d.integration.ScalableTSDFVolume(
voxel_length=4.0 / 512.0,
sdf_trunc=0.04,
color_type=o3d.integration.TSDFVolumeColorType.RGB8)
# volume = o3d.integration.UniformTSDFVolume(
# length=4.0,
# resolution=512,
# sdf_trunc=0.04,
# color_type=o3d.integration.TSDFVolumeColorType.RGB8,
# )
for i in range(0, 103, 1):
# for i in range(2):
print("Integrate {:d}-th image into the volume.".format(i))
color = o3d.io.read_image(
test_data_folder + 'color/{:06d}.jpg'.format(i))
depth = o3d.io.read_image(
test_data_folder + 'depth/{:06d}.png'.format(i))
rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(
color, depth, depth_trunc=4.0, convert_rgb_to_intensity=False)
np.asarray(depth)[np.asarray(depth) > np.percentile(np.asarray(depth), 80)] = 0
np.asarray(depth)[np.asarray(depth) < np.percentile(np.asarray(depth), 20)] = 0
volume.integrate(
rgbd,
camera_intrinsics,
camera_poses[i].pose,
)
print("Extract triangle mesh")
mesh = volume.extract_triangle_mesh()
mesh.compute_vertex_normals()
o3d.visualization.draw_geometries([mesh])
print("Extract voxel-aligned debugging point cloud")
voxel_pcd = volume.extract_voxel_point_cloud()
o3d.visualization.draw_geometries([voxel_pcd])
print("Extract voxel-aligned debugging voxel grid")
voxel_grid = volume.extract_voxel_grid()
o3d.visualization.draw_geometries([voxel_grid])
print("Extract point cloud")
pcd = volume.extract_point_cloud()
o3d.visualization.draw_geometries([pcd])
def overlay_pc():
print("Testing camera in open3d ...")
intrinsic = o3d.camera.PinholeCameraIntrinsic(
o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)
print(intrinsic.intrinsic_matrix)
print(o3d.camera.PinholeCameraIntrinsic())
x = o3d.camera.PinholeCameraIntrinsic(640, 480, 518.8579, 519.4696, 325.5824, 253.7362)
print(x)
print(x.intrinsic_matrix)
o3d.io.write_pinhole_camera_intrinsic("test.json", x)
y = o3d.io.read_pinhole_camera_intrinsic("test.json")
print(y)
print(np.asarray(y.intrinsic_matrix))
print("Read a trajectory and combine all the RGB-D images.")
pcds = []
test_data_folder = '/home/nod/datasets/nyudepthV2/test_kitchen/'
trajectory = o3d.io.read_pinhole_camera_trajectory(
test_data_folder + 'odometry.log')
o3d.io.write_pinhole_camera_trajectory("test.json", trajectory)
print(trajectory)
print(trajectory.parameters[0].extrinsic)
print(np.asarray(trajectory.parameters[0].extrinsic))
for i in range(23, 80, 5):
color = o3d.io.read_image(
test_data_folder + 'color/{:06d}.jpg'.format(i))
depth = o3d.io.read_image(
test_data_folder + 'depth/{:06d}.png'.format(i))
np.asarray(depth)[np.asarray(depth) > np.percentile(np.asarray(depth), 50)] = 0
im = o3d.geometry.RGBDImage.create_from_color_and_depth(
color, depth, depth_trunc=4.0, convert_rgb_to_intensity=False)
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
im, trajectory.parameters[i].intrinsic,
trajectory.parameters[i].extrinsic)
pcds.append(pcd)
o3d.visualization.draw_geometries(pcds)
print("")
if __name__ == "__main__":
# read_trajectory('/home/nod/project/Open3D/examples/TestData/RGBD/odometry.log')
# rgbd_odometry_nyu()
# resave_image('/home/nod/datasets/nyudepthV2/test/d-1315403270.612296-3850931981.pgm')
tsdf()
# overlay_pc()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.