metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "Jian137/mmediting-1",
"score": 2
} |
#### File: core/evaluation/eval_hooks.py
```python
import os.path as osp
from mmcv.runner import Hook
from torch.utils.data import DataLoader
class EvalIterHook(Hook):
"""Non-Distributed evaluation hook for iteration-based runner.
This hook will regularly perform evaluation in a given interval when
performing in non-distributed environment.
Args:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval. Default: 1.
eval_kwargs (dict): Other eval kwargs. It contains:
save_image (bool): Whether to save image.
save_path (str): The path to save image.
"""
def __init__(self, dataloader, interval=1, **eval_kwargs):
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, '
f'but got { type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.eval_kwargs = eval_kwargs
self.save_image = self.eval_kwargs.pop('save_image', False)
self.save_path = self.eval_kwargs.pop('save_path', None)
def after_train_iter(self, runner):
"""The behavior after each train iteration.
Args:
runner (``mmcv.runner.BaseRunner``): The runner.
"""
if not self.every_n_iters(runner, self.interval):
return
runner.log_buffer.clear()
from mmedit.apis import single_gpu_test
results = single_gpu_test(
runner.model,
self.dataloader,
save_image=self.save_image,
save_path=self.save_path,
iteration=runner.iter)
self.evaluate(runner, results)
def evaluate(self, runner, results):
"""Evaluation function.
Args:
runner (``mmcv.runner.BaseRunner``): The runner.
results (dict): Model forward results.
"""
eval_res = self.dataloader.dataset.evaluate(
results, logger=runner.logger, **self.eval_kwargs)
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
class DistEvalIterHook(EvalIterHook):
"""Distributed evaluation hook.
Args:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval. Default: 1.
tmpdir (str | None): Temporary directory to save the results of all
processes. Default: None.
gpu_collect (bool): Whether to use gpu or cpu to collect results.
Default: False.
eval_kwargs (dict): Other eval kwargs. It may contain:
save_image (bool): Whether save image.
save_path (str): The path to save image.
"""
def __init__(self,
dataloader,
interval=1,
gpu_collect=False,
**eval_kwargs):
super().__init__(dataloader, interval, **eval_kwargs)
self.gpu_collect = gpu_collect
def after_train_iter(self, runner):
"""The behavior after each train iteration.
Args:
runner (``mmcv.runner.BaseRunner``): The runner.
"""
if not self.every_n_iters(runner, self.interval):
return
runner.log_buffer.clear()
from mmedit.apis import multi_gpu_test
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
gpu_collect=self.gpu_collect,
save_image=self.save_image,
save_path=self.save_path,
iteration=runner.iter)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)
```
#### File: core/export/wrappers.py
```python
import os.path as osp
import warnings
import numpy as np
import onnxruntime as ort
import torch
from torch import nn
from mmedit.models import BaseMattor, BasicRestorer, build_model
def inference_with_session(sess, io_binding, output_names, input_tensor):
device_type = input_tensor.device.type
device_id = input_tensor.device.index
device_id = 0 if device_id is None else device_id
io_binding.bind_input(
name='input',
device_type=device_type,
device_id=device_id,
element_type=np.float32,
shape=input_tensor.shape,
buffer_ptr=input_tensor.data_ptr())
for name in output_names:
io_binding.bind_output(name)
sess.run_with_iobinding(io_binding)
pred = io_binding.copy_outputs_to_cpu()
return pred
class ONNXRuntimeMattor(nn.Module):
def __init__(self, sess, io_binding, output_names, base_model):
super(ONNXRuntimeMattor, self).__init__()
self.sess = sess
self.io_binding = io_binding
self.output_names = output_names
self.base_model = base_model
def forward(self,
merged,
trimap,
meta,
test_mode=False,
save_image=False,
save_path=None,
iteration=None):
input_tensor = torch.cat((merged, trimap), 1).contiguous()
pred_alpha = inference_with_session(self.sess, self.io_binding,
self.output_names, input_tensor)[0]
pred_alpha = pred_alpha.squeeze()
pred_alpha = self.base_model.restore_shape(pred_alpha, meta)
eval_result = self.base_model.evaluate(pred_alpha, meta)
if save_image:
self.base_model.save_image(pred_alpha, meta, save_path, iteration)
return {'pred_alpha': pred_alpha, 'eval_result': eval_result}
class RestorerGenerator(nn.Module):
def __init__(self, sess, io_binding, output_names):
super(RestorerGenerator, self).__init__()
self.sess = sess
self.io_binding = io_binding
self.output_names = output_names
def forward(self, x):
pred = inference_with_session(self.sess, self.io_binding,
self.output_names, x)[0]
pred = torch.from_numpy(pred)
return pred
class ONNXRuntimeRestorer(nn.Module):
def __init__(self, sess, io_binding, output_names, base_model):
super(ONNXRuntimeRestorer, self).__init__()
self.sess = sess
self.io_binding = io_binding
self.output_names = output_names
self.base_model = base_model
restorer_generator = RestorerGenerator(self.sess, self.io_binding,
self.output_names)
base_model.generator = restorer_generator
def forward(self, lq, gt=None, test_mode=False, **kwargs):
return self.base_model(lq, gt=gt, test_mode=test_mode, **kwargs)
class ONNXRuntimeEditing(nn.Module):
def __init__(self, onnx_file, cfg, device_id):
super(ONNXRuntimeEditing, self).__init__()
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with ONNXRuntime from source.')
session_options = ort.SessionOptions()
# register custom op for onnxruntime
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = ort.InferenceSession(onnx_file, session_options)
providers = ['CPUExecutionProvider']
options = [{}]
is_cuda_available = ort.get_device() == 'GPU'
if is_cuda_available:
providers.insert(0, 'CUDAExecutionProvider')
options.insert(0, {'device_id': device_id})
sess.set_providers(providers, options)
self.sess = sess
self.device_id = device_id
self.io_binding = sess.io_binding()
self.output_names = [_.name for _ in sess.get_outputs()]
base_model = build_model(
cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
if isinstance(base_model, BaseMattor):
WrapperClass = ONNXRuntimeMattor
elif isinstance(base_model, BasicRestorer):
WrapperClass = ONNXRuntimeRestorer
self.wrapper = WrapperClass(self.sess, self.io_binding,
self.output_names, base_model)
def forward(self, **kwargs):
return self.wrapper(**kwargs)
```
#### File: mmedit/datasets/sr_folder_gt_dataset.py
```python
from .base_sr_dataset import BaseSRDataset
from .registry import DATASETS
@DATASETS.register_module()
class SRFolderGTDataset(BaseSRDataset):
"""General ground-truth image folder dataset for image restoration.
The dataset loads gt (Ground-Truth) image only,
applies specified transforms and finally returns a dict containing paired
data and other information.
This is the "gt folder mode", which needs to specify the gt
folder path, each folder containing the corresponding images.
Image lists will be generated automatically.
For example, we have a folder with the following structure:
::
data_root
├── gt
│ ├── 0001.png
│ ├── 0002.png
then, you need to set:
.. code-block:: python
gt_folder = data_root/gt
Args:
gt_folder (str | :obj:`Path`): Path to a gt folder.
pipeline (List[dict | callable]): A sequence of data transformations.
scale (int | tuple): Upsampling scale or upsampling scale range.
test_mode (bool): Store `True` when building test dataset.
Default: `False`.
"""
def __init__(self,
gt_folder,
pipeline,
scale,
test_mode=False,
filename_tmpl='{}'):
super().__init__(pipeline, scale, test_mode)
self.gt_folder = str(gt_folder)
self.filename_tmpl = filename_tmpl
self.data_infos = self.load_annotations()
def load_annotations(self):
"""Load annoations for SR dataset.
It loads the GT image path from folder.
Returns:
dict: Returned dict for GT.
"""
data_infos = []
gt_paths = self.scan_folder(self.gt_folder)
for gt_path in gt_paths:
data_infos.append(dict(gt_path=gt_path))
return data_infos
```
#### File: encoder_decoders/decoders/plain_decoder.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn.utils.weight_init import xavier_init
from torch.autograd import Function
from torch.nn.modules.pooling import _MaxUnpoolNd
from torch.nn.modules.utils import _pair
from mmedit.models.registry import COMPONENTS
class MaxUnpool2dop(Function):
"""We warp the `torch.nn.functional.max_unpool2d`
with an extra `symbolic` method, which is needed while exporting to ONNX.
Users should not call this function directly.
"""
@staticmethod
def forward(ctx, input, indices, kernel_size, stride, padding,
output_size):
"""Forward function of MaxUnpool2dop.
Args:
input (Tensor): Tensor needed to upsample.
indices (Tensor): Indices output of the previous MaxPool.
kernel_size (Tuple): Size of the max pooling window.
stride (Tuple): Stride of the max pooling window.
padding (Tuple): Padding that was added to the input.
output_size (List or Tuple): The shape of output tensor.
Returns:
Tensor: Output tensor.
"""
return F.max_unpool2d(input, indices, kernel_size, stride, padding,
output_size)
@staticmethod
def symbolic(g, input, indices, kernel_size, stride, padding, output_size):
# get shape
input_shape = g.op('Shape', input)
const_0 = g.op('Constant', value_t=torch.tensor(0))
const_1 = g.op('Constant', value_t=torch.tensor(1))
batch_size = g.op('Gather', input_shape, const_0, axis_i=0)
channel = g.op('Gather', input_shape, const_1, axis_i=0)
# height = (height - 1) * stride + kernel_size
height = g.op(
'Gather',
input_shape,
g.op('Constant', value_t=torch.tensor(2)),
axis_i=0)
height = g.op('Sub', height, const_1)
height = g.op('Mul', height,
g.op('Constant', value_t=torch.tensor(stride[1])))
height = g.op('Add', height,
g.op('Constant', value_t=torch.tensor(kernel_size[1])))
# width = (width - 1) * stride + kernel_size
width = g.op(
'Gather',
input_shape,
g.op('Constant', value_t=torch.tensor(3)),
axis_i=0)
width = g.op('Sub', width, const_1)
width = g.op('Mul', width,
g.op('Constant', value_t=torch.tensor(stride[0])))
width = g.op('Add', width,
g.op('Constant', value_t=torch.tensor(kernel_size[0])))
# step of channel
channel_step = g.op('Mul', height, width)
# step of batch
batch_step = g.op('Mul', channel_step, channel)
# channel offset
range_channel = g.op('Range', const_0, channel, const_1)
range_channel = g.op(
'Reshape', range_channel,
g.op('Constant', value_t=torch.tensor([1, -1, 1, 1])))
range_channel = g.op('Mul', range_channel, channel_step)
range_channel = g.op('Cast', range_channel, to_i=7) # 7 is int64
# batch offset
range_batch = g.op('Range', const_0, batch_size, const_1)
range_batch = g.op(
'Reshape', range_batch,
g.op('Constant', value_t=torch.tensor([-1, 1, 1, 1])))
range_batch = g.op('Mul', range_batch, batch_step)
range_batch = g.op('Cast', range_batch, to_i=7) # 7 is int64
# update indices
indices = g.op('Add', indices, range_channel)
indices = g.op('Add', indices, range_batch)
return g.op(
'MaxUnpool',
input,
indices,
kernel_shape_i=kernel_size,
strides_i=stride)
class MaxUnpool2d(_MaxUnpoolNd):
"""This module is modified from Pytorch `MaxUnpool2d` module.
Args:
kernel_size (int or tuple): Size of the max pooling window.
stride (int or tuple): Stride of the max pooling window.
Default: None (It is set to `kernel_size` by default).
padding (int or tuple): Padding that is added to the input.
Default: 0.
"""
def __init__(self, kernel_size, stride=None, padding=0):
super(MaxUnpool2d, self).__init__()
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride or kernel_size)
self.padding = _pair(padding)
def forward(self, input, indices, output_size=None):
"""Forward function of MaxUnpool2d.
Args:
input (Tensor): Tensor needed to upsample.
indices (Tensor): Indices output of the previous MaxPool.
output_size (List or Tuple): The shape of output tensor.
Default: None.
Returns:
Tensor: Output tensor.
"""
return MaxUnpool2dop.apply(input, indices, self.kernel_size,
self.stride, self.padding, output_size)
@COMPONENTS.register_module()
class PlainDecoder(nn.Module):
"""Simple decoder from Deep Image Matting.
Args:
in_channels (int): Channel num of input features.
"""
def __init__(self, in_channels):
super().__init__()
self.deconv6_1 = nn.Conv2d(in_channels, 512, kernel_size=1)
self.deconv5_1 = nn.Conv2d(512, 512, kernel_size=5, padding=2)
self.deconv4_1 = nn.Conv2d(512, 256, kernel_size=5, padding=2)
self.deconv3_1 = nn.Conv2d(256, 128, kernel_size=5, padding=2)
self.deconv2_1 = nn.Conv2d(128, 64, kernel_size=5, padding=2)
self.deconv1_1 = nn.Conv2d(64, 64, kernel_size=5, padding=2)
self.deconv1 = nn.Conv2d(64, 1, kernel_size=5, padding=2)
self.relu = nn.ReLU(inplace=True)
self.max_unpool2d_for_onnx = MaxUnpool2d(kernel_size=2, stride=2)
self.max_unpool2d = nn.MaxUnpool2d(kernel_size=2, stride=2)
def init_weights(self):
"""Init weights for the module.
"""
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m)
def forward(self, inputs):
"""Forward function of PlainDecoder.
Args:
inputs (dict): Output dictionary of the VGG encoder containing:
- out (Tensor): Output of the VGG encoder.
- max_idx_1 (Tensor): Index of the first maxpooling layer in the
VGG encoder.
- max_idx_2 (Tensor): Index of the second maxpooling layer in the
VGG encoder.
- max_idx_3 (Tensor): Index of the third maxpooling layer in the
VGG encoder.
- max_idx_4 (Tensor): Index of the fourth maxpooling layer in the
VGG encoder.
- max_idx_5 (Tensor): Index of the fifth maxpooling layer in the
VGG encoder.
Returns:
Tensor: Output tensor.
"""
max_idx_1 = inputs['max_idx_1']
max_idx_2 = inputs['max_idx_2']
max_idx_3 = inputs['max_idx_3']
max_idx_4 = inputs['max_idx_4']
max_idx_5 = inputs['max_idx_5']
x = inputs['out']
max_unpool2d = self.max_unpool2d
if torch.onnx.is_in_onnx_export():
max_unpool2d = self.max_unpool2d_for_onnx
out = self.relu(self.deconv6_1(x))
out = max_unpool2d(out, max_idx_5)
out = self.relu(self.deconv5_1(out))
out = max_unpool2d(out, max_idx_4)
out = self.relu(self.deconv4_1(out))
out = max_unpool2d(out, max_idx_3)
out = self.relu(self.deconv3_1(out))
out = max_unpool2d(out, max_idx_2)
out = self.relu(self.deconv2_1(out))
out = max_unpool2d(out, max_idx_1)
out = self.relu(self.deconv1_1(out))
raw_alpha = self.deconv1(out)
return raw_alpha
```
#### File: encoder_decoders/encoders/resnet.py
```python
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (build_activation_layer, build_conv_layer,
build_norm_layer, constant_init, kaiming_init)
from mmcv.runner import load_checkpoint
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmedit.utils import get_root_logger
class BasicBlock(nn.Module):
"""Basic block for ResNet."""
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
act_cfg=dict(type='ReLU'),
conv_cfg=None,
norm_cfg=dict(type='BN'),
with_cp=False):
super(BasicBlock, self).__init__()
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.activate = build_activation_layer(act_cfg)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.activate(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.activate(out)
return out
class Bottleneck(nn.Module):
"""Bottleneck block for ResNet."""
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
act_cfg=dict(type='ReLU'),
conv_cfg=None,
norm_cfg=dict(type='BN'),
with_cp=False):
super(Bottleneck, self).__init__()
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.act_cfg = act_cfg
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv1_stride = 1
self.conv2_stride = stride
self.with_cp = with_cp
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
planes,
planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.activate = build_activation_layer(act_cfg)
self.downsample = downsample
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
@property
def norm3(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm3_name)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.activate(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.activate(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.activate(out)
return out
class ResNet(nn.Module):
"""General ResNet.
This class is adopted from
https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/backbones/resnet.py.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Default" 3.
stem_channels (int): Number of stem channels. Default: 64.
base_channels (int): Number of base channels of res layer. Default: 64.
num_stages (int): Resnet stages, normally 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
act_cfg (dict): Dictionary to construct and config activation layer.
conv_cfg (dict): Dictionary to construct and config convolution layer.
norm_cfg (dict): Dictionary to construct and config norm layer.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
multi_grid (Sequence[int]|None): Multi grid dilation rates of last
stage. Default: None
contract_dilation (bool): Whether contract first dilation of each layer
Default: False
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity.
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
in_channels,
stem_channels,
base_channels,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 2, 4),
deep_stem=False,
avg_down=False,
frozen_stages=-1,
act_cfg=dict(type='ReLU'),
conv_cfg=None,
norm_cfg=dict(type='BN'),
with_cp=False,
multi_grid=None,
contract_dilation=False,
zero_init_residual=True):
super(ResNet, self).__init__()
from functools import partial
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.block, stage_blocks = self.arch_settings[depth]
self.depth = depth
self.inplanes = stem_channels
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.multi_grid = multi_grid
self.contract_dilation = contract_dilation
self.zero_init_residual = zero_init_residual
self._make_stem_layer(in_channels, stem_channels)
self.layer1 = self._make_layer(
self.block, 64, stage_blocks[0], stride=strides[0])
self.layer2 = self._make_layer(
self.block, 128, stage_blocks[1], stride=strides[1])
self.layer3 = self._make_layer(
self.block, 256, stage_blocks[2], stride=strides[2])
self.layer4 = self._make_layer(
self.block, 512, stage_blocks[3], stride=strides[3])
self.layer1.apply(partial(self._nostride_dilate, dilate=dilations[0]))
self.layer2.apply(partial(self._nostride_dilate, dilate=dilations[1]))
self.layer3.apply(partial(self._nostride_dilate, dilate=dilations[2]))
self.layer4.apply(partial(self._nostride_dilate, dilate=dilations[3]))
self._freeze_stages()
def _make_stem_layer(self, in_channels, stem_channels):
"""Make stem layer for ResNet."""
if self.deep_stem:
self.stem = nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels // 2,
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
build_activation_layer(self.act_cfg),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels // 2,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
build_activation_layer(self.act_cfg),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels)[1],
build_activation_layer(self.act_cfg))
else:
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.activate = build_activation_layer(self.act_cfg)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
@property
def norm1(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm1_name)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
build_conv_layer(
self.conv_cfg,
self.inplanes,
planes * block.expansion,
stride=stride,
kernel_size=1,
dilation=dilation,
bias=False),
build_norm_layer(self.norm_cfg, planes * block.expansion)[1])
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample=downsample,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
conv_cfg=self.conv_cfg))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
conv_cfg=self.conv_cfg))
return nn.Sequential(*layers)
def _nostride_dilate(self, m, dilate):
classname = m.__class__.__name__
if classname.find('Conv') != -1 and dilate > 1:
# the convolution with stride
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate // 2, dilate // 2)
m.padding = (dilate // 2, dilate // 2)
# other convoluions
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
def init_weights(self, pretrained=None):
"""Init weights for the model.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def _freeze_stages(self):
"""Freeze stages param and norm stats."""
if self.frozen_stages >= 0:
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (N, C, H, W).
Returns:
Tensor: Output tensor.
"""
conv_out = [x]
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.activate(x)
conv_out.append(x)
x = self.maxpool(x)
x = self.layer1(x)
conv_out.append(x)
x = self.layer2(x)
conv_out.append(x)
x = self.layer3(x)
conv_out.append(x)
x = self.layer4(x)
conv_out.append(x)
return conv_out
```
#### File: backbones/sr_backbones/ttsr_net.py
```python
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import build_conv_layer
from mmcv.runner import load_checkpoint
from mmedit.models.common import (PixelShufflePack, ResidualBlockNoBN,
make_layer)
from mmedit.models.registry import BACKBONES
from mmedit.utils import get_root_logger
# Use partial to specify some default arguments
_conv3x3_layer = partial(
build_conv_layer, dict(type='Conv2d'), kernel_size=3, padding=1)
_conv1x1_layer = partial(
build_conv_layer, dict(type='Conv2d'), kernel_size=1, padding=0)
class SFE(nn.Module):
"""Structural Feature Encoder
Backbone of Texture Transformer Network for Image Super-Resolution.
Args:
in_channels (int): Number of channels in the input image
mid_channels (int): Channel number of intermediate features
num_blocks (int): Block number in the trunk network
res_scale (float): Used to scale the residual in residual block.
Default: 1.
"""
def __init__(self, in_channels, mid_channels, num_blocks, res_scale):
super().__init__()
self.num_blocks = num_blocks
self.conv_first = _conv3x3_layer(in_channels, mid_channels)
self.body = make_layer(
ResidualBlockNoBN,
num_blocks,
mid_channels=mid_channels,
res_scale=res_scale)
self.conv_last = _conv3x3_layer(mid_channels, mid_channels)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
x1 = x = F.relu(self.conv_first(x))
x = self.body(x)
x = self.conv_last(x)
x = x + x1
return x
class CSFI2(nn.Module):
"""Cross-Scale Feature Integration between 1x and 2x features.
Cross-Scale Feature Integration in Texture Transformer Network for
Image Super-Resolution.
It is cross-scale feature integration between 1x and 2x features.
For example, `conv2to1` means conv layer from 2x feature to 1x
feature. Down-sampling is achieved by conv layer with stride=2,
and up-sampling is achieved by bicubic interpolate and conv layer.
Args:
mid_channels (int): Channel number of intermediate features
"""
def __init__(self, mid_channels):
super().__init__()
self.conv1to2 = _conv1x1_layer(mid_channels, mid_channels)
self.conv2to1 = _conv3x3_layer(mid_channels, mid_channels, stride=2)
self.conv_merge1 = _conv3x3_layer(mid_channels * 2, mid_channels)
self.conv_merge2 = _conv3x3_layer(mid_channels * 2, mid_channels)
def forward(self, x1, x2):
"""Forward function.
Args:
x1 (Tensor): Input tensor with shape (n, c, h, w).
x2 (Tensor): Input tensor with shape (n, c, 2h, 2w).
Returns:
x1 (Tensor): Output tensor with shape (n, c, h, w).
x2 (Tensor): Output tensor with shape (n, c, 2h, 2w).
"""
x12 = F.interpolate(
x1, scale_factor=2, mode='bicubic', align_corners=False)
x12 = F.relu(self.conv1to2(x12))
x21 = F.relu(self.conv2to1(x2))
x1 = F.relu(self.conv_merge1(torch.cat((x1, x21), dim=1)))
x2 = F.relu(self.conv_merge2(torch.cat((x2, x12), dim=1)))
return x1, x2
class CSFI3(nn.Module):
"""Cross-Scale Feature Integration between 1x, 2x, and 4x features.
Cross-Scale Feature Integration in Texture Transformer Network for
Image Super-Resolution.
It is cross-scale feature integration between 1x and 2x features.
For example, `conv2to1` means conv layer from 2x feature to 1x
feature. Down-sampling is achieved by conv layer with stride=2,
and up-sampling is achieved by bicubic interpolate and conv layer.
Args:
mid_channels (int): Channel number of intermediate features
"""
def __init__(self, mid_channels):
super().__init__()
self.conv1to2 = _conv1x1_layer(mid_channels, mid_channels)
self.conv1to4 = _conv1x1_layer(mid_channels, mid_channels)
self.conv2to1 = _conv3x3_layer(mid_channels, mid_channels, stride=2)
self.conv2to4 = _conv1x1_layer(mid_channels, mid_channels)
self.conv4to1_1 = _conv3x3_layer(mid_channels, mid_channels, stride=2)
self.conv4to1_2 = _conv3x3_layer(mid_channels, mid_channels, stride=2)
self.conv4to2 = _conv3x3_layer(mid_channels, mid_channels, stride=2)
self.conv_merge1 = _conv3x3_layer(mid_channels * 3, mid_channels)
self.conv_merge2 = _conv3x3_layer(mid_channels * 3, mid_channels)
self.conv_merge4 = _conv3x3_layer(mid_channels * 3, mid_channels)
def forward(self, x1, x2, x4):
"""Forward function.
Args:
x1 (Tensor): Input tensor with shape (n, c, h, w).
x2 (Tensor): Input tensor with shape (n, c, 2h, 2w).
x4 (Tensor): Input tensor with shape (n, c, 4h, 4w).
Returns:
x1 (Tensor): Output tensor with shape (n, c, h, w).
x2 (Tensor): Output tensor with shape (n, c, 2h, 2w).
x4 (Tensor): Output tensor with shape (n, c, 4h, 4w).
"""
x12 = F.interpolate(
x1, scale_factor=2, mode='bicubic', align_corners=False)
x12 = F.relu(self.conv1to2(x12))
x14 = F.interpolate(
x1, scale_factor=4, mode='bicubic', align_corners=False)
x14 = F.relu(self.conv1to4(x14))
x21 = F.relu(self.conv2to1(x2))
x24 = F.interpolate(
x2, scale_factor=2, mode='bicubic', align_corners=False)
x24 = F.relu(self.conv2to4(x24))
x41 = F.relu(self.conv4to1_1(x4))
x41 = F.relu(self.conv4to1_2(x41))
x42 = F.relu(self.conv4to2(x4))
x1 = F.relu(self.conv_merge1(torch.cat((x1, x21, x41), dim=1)))
x2 = F.relu(self.conv_merge2(torch.cat((x2, x12, x42), dim=1)))
x4 = F.relu(self.conv_merge4(torch.cat((x4, x14, x24), dim=1)))
return x1, x2, x4
class MergeFeatures(nn.Module):
"""Merge Features. Merge 1x, 2x, and 4x features.
Final module of Texture Transformer Network for Image Super-Resolution.
Args:
mid_channels (int): Channel number of intermediate features
out_channels (int): Number of channels in the output image
"""
def __init__(self, mid_channels, out_channels):
super().__init__()
self.conv1to4 = _conv1x1_layer(mid_channels, mid_channels)
self.conv2to4 = _conv1x1_layer(mid_channels, mid_channels)
self.conv_merge = _conv3x3_layer(mid_channels * 3, mid_channels)
self.conv_last1 = _conv3x3_layer(mid_channels, mid_channels // 2)
self.conv_last2 = _conv1x1_layer(mid_channels // 2, out_channels)
def forward(self, x1, x2, x4):
"""Forward function.
Args:
x1 (Tensor): Input tensor with shape (n, c, h, w).
x2 (Tensor): Input tensor with shape (n, c, 2h, 2w).
x4 (Tensor): Input tensor with shape (n, c, 4h, 4w).
Returns:
x (Tensor): Output tensor with shape (n, c_out, 4h, 4w).
"""
x14 = F.interpolate(
x1, scale_factor=4, mode='bicubic', align_corners=False)
x14 = F.relu(self.conv1to4(x14))
x24 = F.interpolate(
x2, scale_factor=2, mode='bicubic', align_corners=False)
x24 = F.relu(self.conv2to4(x24))
x = F.relu(self.conv_merge(torch.cat((x4, x14, x24), dim=1)))
x = self.conv_last1(x)
x = self.conv_last2(x)
x = torch.clamp(x, -1, 1)
return x
@BACKBONES.register_module()
class TTSRNet(nn.Module):
"""TTSR network structure (main-net) for reference-based super-resolution.
Paper: Learning Texture Transformer Network for Image Super-Resolution
Adapted from 'https://github.com/researchmm/TTSR.git'
'https://github.com/researchmm/TTSR'
Copyright permission at 'https://github.com/researchmm/TTSR/issues/38'.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels in the output image
mid_channels (int): Channel number of intermediate features.
Default: 64
num_blocks (tuple[int]): Block numbers in the trunk network.
Default: (16, 16, 8, 4)
res_scale (float): Used to scale the residual in residual block.
Default: 1.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels=64,
texture_channels=64,
num_blocks=(16, 16, 8, 4),
res_scale=1.0):
super().__init__()
self.texture_channels = texture_channels
self.sfe = SFE(in_channels, mid_channels, num_blocks[0], res_scale)
# stage 1
self.conv_first1 = _conv3x3_layer(4 * texture_channels + mid_channels,
mid_channels)
self.res_block1 = make_layer(
ResidualBlockNoBN,
num_blocks[1],
mid_channels=mid_channels,
res_scale=res_scale)
self.conv_last1 = _conv3x3_layer(mid_channels, mid_channels)
# up-sampling 1 -> 2
self.up1 = PixelShufflePack(
in_channels=mid_channels,
out_channels=mid_channels,
scale_factor=2,
upsample_kernel=3)
# stage 2
self.conv_first2 = _conv3x3_layer(2 * texture_channels + mid_channels,
mid_channels)
self.csfi2 = CSFI2(mid_channels)
self.res_block2_1 = make_layer(
ResidualBlockNoBN,
num_blocks[2],
mid_channels=mid_channels,
res_scale=res_scale)
self.res_block2_2 = make_layer(
ResidualBlockNoBN,
num_blocks[2],
mid_channels=mid_channels,
res_scale=res_scale)
self.conv_last2_1 = _conv3x3_layer(mid_channels, mid_channels)
self.conv_last2_2 = _conv3x3_layer(mid_channels, mid_channels)
# up-sampling 2 -> 3
self.up2 = PixelShufflePack(
in_channels=mid_channels,
out_channels=mid_channels,
scale_factor=2,
upsample_kernel=3)
# stage 3
self.conv_first3 = _conv3x3_layer(texture_channels + mid_channels,
mid_channels)
self.csfi3 = CSFI3(mid_channels)
self.res_block3_1 = make_layer(
ResidualBlockNoBN,
num_blocks[3],
mid_channels=mid_channels,
res_scale=res_scale)
self.res_block3_2 = make_layer(
ResidualBlockNoBN,
num_blocks[3],
mid_channels=mid_channels,
res_scale=res_scale)
self.res_block3_3 = make_layer(
ResidualBlockNoBN,
num_blocks[3],
mid_channels=mid_channels,
res_scale=res_scale)
self.conv_last3_1 = _conv3x3_layer(mid_channels, mid_channels)
self.conv_last3_2 = _conv3x3_layer(mid_channels, mid_channels)
self.conv_last3_3 = _conv3x3_layer(mid_channels, mid_channels)
# end, merge features
self.merge_features = MergeFeatures(mid_channels, out_channels)
def forward(self, x, soft_attention, textures):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
soft_attention (Tensor): Soft-Attention tensor with shape
(n, 1, h, w).
textures (Tuple[Tensor]): Transferred HR texture tensors.
[(N, C, H, W), (N, C/2, 2H, 2W), ...]
Returns:
Tensor: Forward results.
"""
assert textures[-1].shape[1] == self.texture_channels
x1 = self.sfe(x)
# stage 1
x1_res = torch.cat((x1, textures[0]), dim=1)
x1_res = self.conv_first1(x1_res)
# soft-attention
x1 = x1 + x1_res * soft_attention
x1_res = self.res_block1(x1)
x1_res = self.conv_last1(x1_res)
x1 = x1 + x1_res
# stage 2
x21 = x1
x22 = self.up1(x1)
x22 = F.relu(x22)
x22_res = torch.cat((x22, textures[1]), dim=1)
x22_res = self.conv_first2(x22_res)
# soft-attention
x22_res = x22_res * F.interpolate(
soft_attention,
scale_factor=2,
mode='bicubic',
align_corners=False)
x22 = x22 + x22_res
x21_res, x22_res = self.csfi2(x21, x22)
x21_res = self.res_block2_1(x21_res)
x22_res = self.res_block2_2(x22_res)
x21_res = self.conv_last2_1(x21_res)
x22_res = self.conv_last2_2(x22_res)
x21 = x21 + x21_res
x22 = x22 + x22_res
# stage 3
x31 = x21
x32 = x22
x33 = self.up2(x22)
x33 = F.relu(x33)
x33_res = torch.cat((x33, textures[2]), dim=1)
x33_res = self.conv_first3(x33_res)
# soft-attention
x33_res = x33_res * F.interpolate(
soft_attention,
scale_factor=4,
mode='bicubic',
align_corners=False)
x33 = x33 + x33_res
x31_res, x32_res, x33_res = self.csfi3(x31, x32, x33)
x31_res = self.res_block3_1(x31_res)
x32_res = self.res_block3_2(x32_res)
x33_res = self.res_block3_3(x33_res)
x31_res = self.conv_last3_1(x31_res)
x32_res = self.conv_last3_2(x32_res)
x33_res = self.conv_last3_3(x33_res)
x31 = x31 + x31_res
x32 = x32 + x32_res
x33 = x33 + x33_res
x = self.merge_features(x31, x32, x33)
return x
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
strict (boo, optional): Whether strictly load the pretrained model.
Defaults to True.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif pretrained is None:
pass # use default initialization
else:
raise TypeError('"pretrained" must be a str or None. '
f'But received {type(pretrained)}.')
```
#### File: components/refiners/mlp_refiner.py
```python
import torch.nn as nn
from mmcv.runner import load_checkpoint
from mmedit.models.registry import COMPONENTS
from mmedit.utils import get_root_logger
@COMPONENTS.register_module()
class MLPRefiner(nn.Module):
"""Multilayer perceptrons (MLPs), refiner used in LIIF.
Args:
in_dim (int): Input dimension.
out_dim (int): Output dimension.
hidden_list (list[int]): List of hidden dimensions.
"""
def __init__(self, in_dim, out_dim, hidden_list):
super().__init__()
layers = []
lastv = in_dim
for hidden in hidden_list:
layers.append(nn.Linear(lastv, hidden))
layers.append(nn.ReLU())
lastv = hidden
layers.append(nn.Linear(lastv, out_dim))
self.layers = nn.Sequential(*layers)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): The input of MLP.
Returns:
Tensor: The output of MLP.
"""
shape = x.shape[:-1]
x = self.layers(x.view(-1, x.shape[-1]))
return x.view(*shape, -1)
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
strict (boo, optional): Whether strictly load the pretrained model.
Defaults to True.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif pretrained is None:
pass
else:
raise TypeError(f'"pretrained" must be a str or None. '
f'But received {type(pretrained)}.')
```
#### File: components/stylegan2/modules.py
```python
import math
from copy import deepcopy
from functools import partial
import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.cnn.bricks.activation import build_activation_layer
from mmcv.cnn.utils import normal_init
from mmcv.ops.fused_bias_leakyrelu import (FusedBiasLeakyReLU,
fused_bias_leakyrelu)
from mmcv.ops.upfirdn2d import upfirdn2d
from torch.nn.init import _calculate_correct_fan
def pixel_norm(x, eps=1e-6):
"""Pixel Normalization.
This normalization is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
Args:
x (torch.Tensor): Tensor to be normalized.
eps (float, optional): Epsilon to avoid dividing zero.
Defaults to 1e-6.
Returns:
torch.Tensor: Normalized tensor.
"""
if torch.__version__ >= '1.7.0':
norm = torch.linalg.norm(x, ord=2, dim=1, keepdim=True)
# support older pytorch version
else:
norm = torch.norm(x, p=2, dim=1, keepdim=True)
norm = norm / torch.sqrt(torch.tensor(x.shape[1]).to(x))
return x / (norm + eps)
class PixelNorm(nn.Module):
"""Pixel Normalization.
This module is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
Args:
eps (float, optional): Epsilon value. Defaults to 1e-6.
"""
_abbr_ = 'pn'
def __init__(self, in_channels=None, eps=1e-6):
super(PixelNorm, self).__init__()
self.eps = eps
def forward(self, x):
return pixel_norm(x, self.eps)
class EqualizedLR:
r"""Equalized Learning Rate.
This trick is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
The general idea is to dynamically rescale the weight in training instead
of in initializing so that the variance of the responses in each layer is
guaranteed with some statistical properties.
Note that this function is always combined with a convolution module which
is initialized with :math:`\mathcal{N}(0, 1)`.
Args:
name (str | optional): The name of weights. Defaults to 'weight'.
mode (str, optional): The mode of computing ``fan`` which is the
same as ``kaiming_init`` in pytorch. You can choose one from
['fan_in', 'fan_out']. Defaults to 'fan_in'.
"""
def __init__(self, name='weight', gain=2**0.5, mode='fan_in', lr_mul=1.0):
self.name = name
self.mode = mode
self.gain = gain
self.lr_mul = lr_mul
def compute_weight(self, module):
"""Compute weight with equalized learning rate.
Args:
module (nn.Module): A module that is wrapped with equalized lr.
Returns:
torch.Tensor: Updated weight.
"""
weight = getattr(module, self.name + '_orig')
if weight.ndim == 5:
# weight in shape of [b, out, in, k, k]
fan = _calculate_correct_fan(weight[0], self.mode)
else:
assert weight.ndim <= 4
fan = _calculate_correct_fan(weight, self.mode)
weight = weight * torch.tensor(
self.gain, device=weight.device) * torch.sqrt(
torch.tensor(1. / fan, device=weight.device)) * self.lr_mul
return weight
def __call__(self, module, inputs):
"""Standard interface for forward pre hooks."""
setattr(module, self.name, self.compute_weight(module))
@staticmethod
def apply(module, name, gain=2**0.5, mode='fan_in', lr_mul=1.):
"""Apply function.
This function is to register an equalized learning rate hook in an
``nn.Module``.
Args:
module (nn.Module): Module to be wrapped.
name (str | optional): The name of weights. Defaults to 'weight'.
mode (str, optional): The mode of computing ``fan`` which is the
same as ``kaiming_init`` in pytorch. You can choose one from
['fan_in', 'fan_out']. Defaults to 'fan_in'.
Returns:
nn.Module: Module that is registered with equalized lr hook.
"""
# sanity check for duplicated hooks.
for _, hook in module._forward_pre_hooks.items():
if isinstance(hook, EqualizedLR):
raise RuntimeError(
'Cannot register two equalized_lr hooks on the same '
f'parameter {name} in {module} module.')
fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul)
weight = module._parameters[name]
delattr(module, name)
module.register_parameter(name + '_orig', weight)
# We still need to assign weight back as fn.name because all sorts of
# things may assume that it exists, e.g., when initializing weights.
# However, we can't directly assign as it could be an nn.Parameter and
# gets added as a parameter. Instead, we register weight.data as a
# plain attribute.
setattr(module, name, weight.data)
module.register_forward_pre_hook(fn)
# TODO: register load state dict hook
return fn
def equalized_lr(module, name='weight', gain=2**0.5, mode='fan_in', lr_mul=1.):
r"""Equalized Learning Rate.
This trick is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
The general idea is to dynamically rescale the weight in training instead
of in initializing so that the variance of the responses in each layer is
guaranteed with some statistical properties.
Note that this function is always combined with a convolution module which
is initialized with :math:`\mathcal{N}(0, 1)`.
Args:
module (nn.Module): Module to be wrapped.
name (str | optional): The name of weights. Defaults to 'weight'.
mode (str, optional): The mode of computing ``fan`` which is the
same as ``kaiming_init`` in pytorch. You can choose one from
['fan_in', 'fan_out']. Defaults to 'fan_in'.
Returns:
nn.Module: Module that is registered with equalized lr hook.
"""
EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul)
return module
class EqualizedLRConvModule(ConvModule):
r"""Equalized LR ConvModule.
In this module, we inherit default ``mmcv.cnn.ConvModule`` and adopt
equalized lr in convolution. The equalized learning rate is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
Note that, the initialization of ``self.conv`` will be overwritten as
:math:`\mathcal{N}(0, 1)`.
Args:
equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``.
If ``None``, equalized learning rate is ignored. Defaults to
dict(mode='fan_in').
"""
def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs):
super(EqualizedLRConvModule, self).__init__(*args, **kwargs)
self.with_equlized_lr = equalized_lr_cfg is not None
if self.with_equlized_lr:
self.conv = equalized_lr(self.conv, **equalized_lr_cfg)
# initialize the conv weight with standard Gaussian noise.
self._init_conv_weights()
def _init_conv_weights(self):
"""Initialize conv weights as described in PGGAN."""
normal_init(self.conv)
class EqualizedLRLinearModule(nn.Linear):
r"""Equalized LR LinearModule.
In this module, we adopt equalized lr in ``nn.Linear``. The equalized
learning rate is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
Note that, the initialization of ``self.weight`` will be overwritten as
:math:`\mathcal{N}(0, 1)`.
Args:
equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``.
If ``None``, equalized learning rate is ignored. Defaults to
dict(mode='fan_in').
"""
def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs):
super(EqualizedLRLinearModule, self).__init__(*args, **kwargs)
self.with_equlized_lr = equalized_lr_cfg is not None
if self.with_equlized_lr:
self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.)
else:
# In fact, lr_mul will only be used in EqualizedLR for
# initialization
self.lr_mul = 1.
if self.with_equlized_lr:
equalized_lr(self, **equalized_lr_cfg)
self._init_linear_weights()
def _init_linear_weights(self):
"""Initialize linear weights as described in PGGAN."""
nn.init.normal_(self.weight, 0, 1. / self.lr_mul)
if self.bias is not None:
nn.init.constant_(self.bias, 0.)
class EqualLinearActModule(nn.Module):
"""Equalized LR Linear Module with Activation Layer.
Args:
nn ([type]): [description]
"""
def __init__(self,
*args,
equalized_lr_cfg=dict(gain=1., lr_mul=1.),
bias=True,
bias_init=0.,
act_cfg=None,
**kwargs):
super(EqualLinearActModule, self).__init__()
self.with_activation = act_cfg is not None
# w/o bias in linear layer
self.linear = EqualizedLRLinearModule(
*args, bias=False, equalized_lr_cfg=equalized_lr_cfg, **kwargs)
if equalized_lr_cfg is not None:
self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.)
else:
self.lr_mul = 1.
# define bias outside linear layer
if bias:
self.bias = nn.Parameter(
torch.zeros(self.linear.out_features).fill_(bias_init))
else:
self.bias = None
if self.with_activation:
act_cfg = deepcopy(act_cfg)
if act_cfg['type'] == 'fused_bias':
self.act_type = act_cfg.pop('type')
assert self.bias is not None
self.activate = partial(fused_bias_leakyrelu, **act_cfg)
else:
self.act_type = 'normal'
self.activate = build_activation_layer(act_cfg)
else:
self.act_type = None
def forward(self, x):
if x.ndim >= 3:
x = x.reshape(x.size(0), -1)
x = self.linear(x)
if self.with_activation and self.act_type == 'fused_bias':
x = self.activate(x, self.bias * self.lr_mul)
elif self.bias is not None and self.with_activation:
x = self.activate(x + self.bias * self.lr_mul)
elif self.bias is not None:
x = x + self.bias * self.lr_mul
elif self.with_activation:
x = self.activate(x)
return x
def _make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
class UpsampleUpFIRDn(nn.Module):
def __init__(self, kernel, factor=2):
super(UpsampleUpFIRDn, self).__init__()
self.factor = factor
kernel = _make_kernel(kernel) * (factor**2)
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2
self.pad = (pad0, pad1)
def forward(self, x):
out = upfirdn2d(x, self.kernel, up=self.factor, down=1, pad=self.pad)
return out
class DonwsampleUpFIRDn(nn.Module):
def __init__(self, kernel, factor=2):
super(DonwsampleUpFIRDn, self).__init__()
self.factor = factor
kernel = _make_kernel(kernel)
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2
pad1 = p // 2
self.pad = (pad0, pad1)
def forward(self, input):
out = upfirdn2d(
input, self.kernel, up=1, down=self.factor, pad=self.pad)
return out
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super(Blur, self).__init__()
kernel = _make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * (upsample_factor**2)
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, x):
return upfirdn2d(x, self.kernel, pad=self.pad)
class ModulatedConv2d(nn.Module):
r"""Modulated Conv2d in StyleGANv2.
Attention:
#. ``style_bias`` is provided to check the difference between official TF
implementation and other PyTorch implementation.
In TF, Tero explicitly add the ``1.`` after style code, while unofficial
implementation adopts bias initialization with ``1.``.
Details can be found in:
https://github.com/rosinality/stylegan2-pytorch/blob/master/model.py#L214
https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py#L99
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
style_channels,
demodulate=True,
upsample=False,
downsample=False,
blur_kernel=[1, 3, 3, 1],
equalized_lr_cfg=dict(mode='fan_in', lr_mul=1., gain=1.),
style_mod_cfg=dict(bias_init=1.),
style_bias=0.,
eps=1e-8):
super(ModulatedConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.style_channels = style_channels
self.demodulate = demodulate
# sanity check for kernel size
assert isinstance(self.kernel_size,
int) and (self.kernel_size >= 1
and self.kernel_size % 2 == 1)
self.upsample = upsample
self.downsample = downsample
self.style_bias = style_bias
self.eps = eps
# build style modulation module
style_mod_cfg = dict() if style_mod_cfg is None else style_mod_cfg
self.style_modulation = EqualLinearActModule(style_channels,
in_channels,
**style_mod_cfg)
# set lr_mul for conv weight
lr_mul_ = 1.
if equalized_lr_cfg is not None:
lr_mul_ = equalized_lr_cfg.get('lr_mul', 1.)
self.weight = nn.Parameter(
torch.randn(1, out_channels, in_channels, kernel_size,
kernel_size).div_(lr_mul_))
# build blurry layer for upsampling
if upsample:
factor = 2
p = (len(blur_kernel) - factor) - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, (pad0, pad1), upsample_factor=factor)
# build blurry layer for downsampling
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
# add equalized_lr hook for conv weight
if equalized_lr_cfg is not None:
equalized_lr(self, **equalized_lr_cfg)
self.padding = kernel_size // 2
def forward(self, x, style):
n, c, h, w = x.shape
# process style code
style = self.style_modulation(style).view(n, 1, c, 1,
1) + self.style_bias
# combine weight and style
weight = self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps)
weight = weight * demod.view(n, self.out_channels, 1, 1, 1)
weight = weight.view(n * self.out_channels, c, self.kernel_size,
self.kernel_size)
if self.upsample:
x = x.reshape(1, n * c, h, w)
weight = weight.view(n, self.out_channels, c, self.kernel_size,
self.kernel_size)
weight = weight.transpose(1, 2).reshape(n * c, self.out_channels,
self.kernel_size,
self.kernel_size)
x = F.conv_transpose2d(x, weight, padding=0, stride=2, groups=n)
x = x.reshape(n, self.out_channels, *x.shape[-2:])
x = self.blur(x)
elif self.downsample:
x = self.blur(x)
x = x.view(1, n * self.in_channels, *x.shape[-2:])
x = F.conv2d(x, weight, stride=2, padding=0, groups=n)
x = x.view(n, self.out_channels, *x.shape[-2:])
else:
x = x.view(1, n * c, h, w)
x = F.conv2d(x, weight, stride=1, padding=self.padding, groups=n)
x = x.view(n, self.out_channels, *x.shape[-2:])
return x
class NoiseInjection(nn.Module):
def __init__(self, noise_weight_init=0.):
super(NoiseInjection, self).__init__()
self.weight = nn.Parameter(torch.zeros(1).fill_(noise_weight_init))
def forward(self, image, noise=None, return_noise=False):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
if return_noise:
return image + self.weight * noise, noise
return image + self.weight * noise
class ConstantInput(nn.Module):
def __init__(self, channel, size=4):
super().__init__()
if isinstance(size, int):
size = [size, size]
elif mmcv.is_seq_of(size, int):
assert len(
size
) == 2, f'The length of size should be 2 but got {len(size)}'
else:
raise ValueError(f'Got invalid value in size, {size}')
self.input = nn.Parameter(torch.randn(1, channel, *size))
def forward(self, x):
batch = x.shape[0]
out = self.input.repeat(batch, 1, 1, 1)
return out
class ModulatedPEConv2d(nn.Module):
r"""Modulated Conv2d in StyleGANv2.
Attention:
#. ``style_bias`` is provided to check the difference between official TF
implementation and other PyTorch implementation.
In TF, Tero explicitly add the ``1.`` after style code, while unofficial
implementation adopts bias initialization with ``1.``.
Details can be found in:
https://github.com/rosinality/stylegan2-pytorch/blob/master/model.py#L214
https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py#L99
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
style_channels,
demodulate=True,
upsample=False,
downsample=False,
blur_kernel=[1, 3, 3, 1],
equalized_lr_cfg=dict(mode='fan_in', lr_mul=1., gain=1.),
style_mod_cfg=dict(bias_init=1.),
style_bias=0.,
eps=1e-8,
no_pad=False,
deconv2conv=False,
interp_pad=None,
up_config=dict(scale_factor=2, mode='nearest'),
up_after_conv=False):
super(ModulatedPEConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.style_channels = style_channels
self.demodulate = demodulate
# sanity check for kernel size
assert isinstance(self.kernel_size,
int) and (self.kernel_size >= 1
and self.kernel_size % 2 == 1)
self.upsample = upsample
self.downsample = downsample
self.style_bias = style_bias
self.eps = eps
self.no_pad = no_pad
self.deconv2conv = deconv2conv
self.interp_pad = interp_pad
self.with_interp_pad = interp_pad is not None
self.up_config = deepcopy(up_config)
self.up_after_conv = up_after_conv
# build style modulation module
style_mod_cfg = dict() if style_mod_cfg is None else style_mod_cfg
self.style_modulation = EqualLinearActModule(style_channels,
in_channels,
**style_mod_cfg)
# set lr_mul for conv weight
lr_mul_ = 1.
if equalized_lr_cfg is not None:
lr_mul_ = equalized_lr_cfg.get('lr_mul', 1.)
self.weight = nn.Parameter(
torch.randn(1, out_channels, in_channels, kernel_size,
kernel_size).div_(lr_mul_))
# build blurry layer for upsampling
if upsample and not self.deconv2conv:
factor = 2
p = (len(blur_kernel) - factor) - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, (pad0, pad1), upsample_factor=factor)
# build blurry layer for downsampling
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
# add equalized_lr hook for conv weight
if equalized_lr_cfg is not None:
equalized_lr(self, **equalized_lr_cfg)
# if `no_pad`, remove all of the padding in conv
self.padding = kernel_size // 2 if not no_pad else 0
def forward(self, x, style):
n, c, h, w = x.shape
# process style code
style = self.style_modulation(style).view(n, 1, c, 1,
1) + self.style_bias
# combine weight and style
weight = self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps)
weight = weight * demod.view(n, self.out_channels, 1, 1, 1)
weight = weight.view(n * self.out_channels, c, self.kernel_size,
self.kernel_size)
if self.upsample and not self.deconv2conv:
x = x.reshape(1, n * c, h, w)
weight = weight.view(n, self.out_channels, c, self.kernel_size,
self.kernel_size)
weight = weight.transpose(1, 2).reshape(n * c, self.out_channels,
self.kernel_size,
self.kernel_size)
x = F.conv_transpose2d(x, weight, padding=0, stride=2, groups=n)
x = x.reshape(n, self.out_channels, *x.shape[-2:])
x = self.blur(x)
elif self.upsample and self.deconv2conv:
if self.up_after_conv:
x = x.reshape(1, n * c, h, w)
x = F.conv2d(x, weight, padding=self.padding, groups=n)
x = x.view(n, self.out_channels, *x.shape[2:4])
if self.with_interp_pad:
h_, w_ = x.shape[-2:]
up_cfg_ = deepcopy(self.up_config)
up_scale = up_cfg_.pop('scale_factor')
size_ = (h_ * up_scale + self.interp_pad,
w_ * up_scale + self.interp_pad)
x = F.interpolate(x, size=size_, **up_cfg_)
else:
x = F.interpolate(x, **self.up_config)
if not self.up_after_conv:
h_, w_ = x.shape[-2:]
x = x.view(1, n * c, h_, w_)
x = F.conv2d(x, weight, padding=self.padding, groups=n)
x = x.view(n, self.out_channels, *x.shape[2:4])
elif self.downsample:
x = self.blur(x)
x = x.view(1, n * self.in_channels, *x.shape[-2:])
x = F.conv2d(x, weight, stride=2, padding=0, groups=n)
x = x.view(n, self.out_channels, *x.shape[-2:])
else:
x = x.view(1, n * c, h, w)
x = F.conv2d(x, weight, stride=1, padding=self.padding, groups=n)
x = x.view(n, self.out_channels, *x.shape[-2:])
return x
class ModulatedStyleConv(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
style_channels,
upsample=False,
blur_kernel=[1, 3, 3, 1],
demodulate=True,
style_mod_cfg=dict(bias_init=1.),
style_bias=0.):
super(ModulatedStyleConv, self).__init__()
self.conv = ModulatedConv2d(
in_channels,
out_channels,
kernel_size,
style_channels,
demodulate=demodulate,
upsample=upsample,
blur_kernel=blur_kernel,
style_mod_cfg=style_mod_cfg,
style_bias=style_bias)
self.noise_injector = NoiseInjection()
self.activate = FusedBiasLeakyReLU(out_channels)
def forward(self, x, style, noise=None, return_noise=False):
out = self.conv(x, style)
if return_noise:
out, noise = self.noise_injector(
out, noise=noise, return_noise=return_noise)
else:
out = self.noise_injector(
out, noise=noise, return_noise=return_noise)
out = self.activate(out)
if return_noise:
return out, noise
else:
return out
class ModulatedPEStyleConv(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
style_channels,
upsample=False,
blur_kernel=[1, 3, 3, 1],
demodulate=True,
style_mod_cfg=dict(bias_init=1.),
style_bias=0.,
**kwargs):
super(ModulatedPEStyleConv, self).__init__()
self.conv = ModulatedPEConv2d(
in_channels,
out_channels,
kernel_size,
style_channels,
demodulate=demodulate,
upsample=upsample,
blur_kernel=blur_kernel,
style_mod_cfg=style_mod_cfg,
style_bias=style_bias,
**kwargs)
self.noise_injector = NoiseInjection()
self.activate = FusedBiasLeakyReLU(out_channels)
def forward(self, x, style, noise=None, return_noise=False):
out = self.conv(x, style)
if return_noise:
out, noise = self.noise_injector(
out, noise=noise, return_noise=return_noise)
else:
out = self.noise_injector(
out, noise=noise, return_noise=return_noise)
out = self.activate(out)
if return_noise:
return out, noise
else:
return out
class ModulatedToRGB(nn.Module):
def __init__(self,
in_channels,
style_channels,
out_channels=3,
upsample=True,
blur_kernel=[1, 3, 3, 1],
style_mod_cfg=dict(bias_init=1.),
style_bias=0.):
super(ModulatedToRGB, self).__init__()
if upsample:
self.upsample = UpsampleUpFIRDn(blur_kernel)
self.conv = ModulatedConv2d(
in_channels,
out_channels=out_channels,
kernel_size=1,
style_channels=style_channels,
demodulate=False,
style_mod_cfg=style_mod_cfg,
style_bias=style_bias)
self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
def forward(self, x, style, skip=None):
out = self.conv(x, style)
out = out + self.bias
if skip is not None:
skip = self.upsample(skip)
out = out + skip
return out
class ConvDownLayer(nn.Sequential):
def __init__(self,
in_channels,
out_channels,
kernel_size,
downsample=False,
blur_kernel=[1, 3, 3, 1],
bias=True,
act_cfg=dict(type='fused_bias')):
layers = []
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
stride = 2
self.padding = 0
else:
stride = 1
self.padding = kernel_size // 2
self.with_fused_bias = act_cfg is not None and act_cfg.get(
'type') == 'fused_bias'
if self.with_fused_bias:
conv_act_cfg = None
else:
conv_act_cfg = act_cfg
layers.append(
EqualizedLRConvModule(
in_channels,
out_channels,
kernel_size,
padding=self.padding,
stride=stride,
bias=bias and not self.with_fused_bias,
norm_cfg=None,
act_cfg=conv_act_cfg,
equalized_lr_cfg=dict(mode='fan_in', gain=1.)))
if self.with_fused_bias:
layers.append(FusedBiasLeakyReLU(out_channels))
super(ConvDownLayer, self).__init__(*layers)
class ResBlock(nn.Module):
def __init__(self, in_channels, out_channels, blur_kernel=[1, 3, 3, 1]):
super(ResBlock, self).__init__()
self.conv1 = ConvDownLayer(
in_channels, in_channels, 3, blur_kernel=blur_kernel)
self.conv2 = ConvDownLayer(
in_channels,
out_channels,
3,
downsample=True,
blur_kernel=blur_kernel)
self.skip = ConvDownLayer(
in_channels,
out_channels,
1,
downsample=True,
act_cfg=None,
bias=False,
blur_kernel=blur_kernel)
def forward(self, input):
out = self.conv1(input)
out = self.conv2(out)
skip = self.skip(input)
out = (out + skip) / math.sqrt(2)
return out
class ModMBStddevLayer(nn.Module):
"""Modified MiniBatch Stddev Layer.
This layer is modified from ``MiniBatchStddevLayer`` used in PGGAN. In
StyleGAN2, the authors add a new feature, `channel_groups`, into this
layer.
"""
def __init__(self,
group_size=4,
channel_groups=1,
sync_groups=None,
eps=1e-8):
super(ModMBStddevLayer, self).__init__()
self.group_size = group_size
self.eps = eps
self.channel_groups = channel_groups
self.sync_groups = group_size if sync_groups is None else sync_groups
def forward(self, x):
# batch size should be smaller than or equal to group size. Otherwise,
# batch size should be divisible by the group size.
assert x.shape[
0] <= self.group_size or x.shape[0] % self.group_size == 0, (
'Batch size be smaller than or equal '
'to group size. Otherwise,'
' batch size should be divisible by the group size.'
f'But got batch size {x.shape[0]},'
f' group size {self.group_size}')
assert x.shape[1] % self.channel_groups == 0, (
'"channel_groups" must be divided by the feature channels. '
f'channel_groups: {self.channel_groups}, '
f'feature channels: {x.shape[1]}')
n, c, h, w = x.shape
group_size = min(n, self.group_size)
# [G, M, Gc, C', H, W]
y = torch.reshape(x, (group_size, -1, self.channel_groups,
c // self.channel_groups, h, w))
y = torch.var(y, dim=0, unbiased=False)
y = torch.sqrt(y + self.eps)
# [M, 1, 1, 1]
y = y.mean(dim=(2, 3, 4), keepdim=True).squeeze(2)
y = y.repeat(group_size, 1, h, w)
return torch.cat([x, y], dim=1)
```
#### File: models/losses/composition_loss.py
```python
import torch.nn as nn
from ..registry import LOSSES
from .pixelwise_loss import charbonnier_loss, l1_loss, mse_loss
_reduction_modes = ['none', 'mean', 'sum']
@LOSSES.register_module()
class L1CompositionLoss(nn.Module):
"""L1 composition loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs):
"""
Args:
pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte.
fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object.
bg (Tensor): of shape (N, 3, H, W). Tensor of background object.
ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged
image before normalized by ImageNet mean and std.
weight (Tensor, optional): of shape (N, 1, H, W). It is an
indicating matrix: weight[trimap == 128] = 1. Default: None.
"""
pred_merged = pred_alpha * fg + (1. - pred_alpha) * bg
if weight is not None:
weight = weight.expand(-1, 3, -1, -1)
return self.loss_weight * l1_loss(
pred_merged,
ori_merged,
weight,
reduction=self.reduction,
sample_wise=self.sample_wise)
@LOSSES.register_module()
class MSECompositionLoss(nn.Module):
"""MSE (L2) composition loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs):
"""
Args:
pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte.
fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object.
bg (Tensor): of shape (N, 3, H, W). Tensor of background object.
ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged
image before normalized by ImageNet mean and std.
weight (Tensor, optional): of shape (N, 1, H, W). It is an
indicating matrix: weight[trimap == 128] = 1. Default: None.
"""
pred_merged = pred_alpha * fg + (1. - pred_alpha) * bg
if weight is not None:
weight = weight.expand(-1, 3, -1, -1)
return self.loss_weight * mse_loss(
pred_merged,
ori_merged,
weight,
reduction=self.reduction,
sample_wise=self.sample_wise)
@LOSSES.register_module()
class CharbonnierCompLoss(nn.Module):
"""Charbonnier composition loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
eps (float): A value used to control the curvature near zero.
Default: 1e-12.
"""
def __init__(self,
loss_weight=1.0,
reduction='mean',
sample_wise=False,
eps=1e-12):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
self.eps = eps
def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs):
"""
Args:
pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte.
fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object.
bg (Tensor): of shape (N, 3, H, W). Tensor of background object.
ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged
image before normalized by ImageNet mean and std.
weight (Tensor, optional): of shape (N, 1, H, W). It is an
indicating matrix: weight[trimap == 128] = 1. Default: None.
"""
pred_merged = pred_alpha * fg + (1. - pred_alpha) * bg
if weight is not None:
weight = weight.expand(-1, 3, -1, -1)
return self.loss_weight * charbonnier_loss(
pred_merged,
ori_merged,
weight,
eps=self.eps,
reduction=self.reduction,
sample_wise=self.sample_wise)
```
#### File: models/mattors/indexnet.py
```python
import torch
from mmcv.runner import auto_fp16
from ..builder import build_loss
from ..registry import MODELS
from .base_mattor import BaseMattor
from .utils import get_unknown_tensor
@MODELS.register_module()
class IndexNet(BaseMattor):
"""IndexNet matting model.
This implementation follows:
Indices Matter: Learning to Index for Deep Image Matting
Args:
backbone (dict): Config of backbone.
train_cfg (dict): Config of training. In 'train_cfg', 'train_backbone'
should be specified.
test_cfg (dict): Config of testing.
pretrained (str): path of pretrained model.
loss_alpha (dict): Config of the alpha prediction loss. Default: None.
loss_comp (dict): Config of the composition loss. Default: None.
"""
def __init__(self,
backbone,
train_cfg=None,
test_cfg=None,
pretrained=None,
loss_alpha=None,
loss_comp=None):
super().__init__(backbone, None, train_cfg, test_cfg, pretrained)
self.loss_alpha = (
build_loss(loss_alpha) if loss_alpha is not None else None)
self.loss_comp = (
build_loss(loss_comp) if loss_comp is not None else None)
# support fp16
self.fp16_enabled = False
def forward_dummy(self, inputs):
return self.backbone(inputs)
@auto_fp16(apply_to=('merged', 'trimap'))
def forward_train(self, merged, trimap, meta, alpha, ori_merged, fg, bg):
"""Forward function for training IndexNet model.
Args:
merged (Tensor): Input images tensor with shape (N, C, H, W).
Typically these should be mean centered and std scaled.
trimap (Tensor): Tensor of trimap with shape (N, 1, H, W).
meta (list[dict]): Meta data about the current data batch.
alpha (Tensor): Tensor of alpha with shape (N, 1, H, W).
ori_merged (Tensor): Tensor of origin merged images (not
normalized) with shape (N, C, H, W).
fg (Tensor): Tensor of foreground with shape (N, C, H, W).
bg (Tensor): Tensor of background with shape (N, C, H, W).
Returns:
dict: Contains the loss items and batch information.
"""
pred_alpha = self.backbone(torch.cat((merged, trimap), 1))
losses = dict()
weight = get_unknown_tensor(trimap, meta)
if self.loss_alpha is not None:
losses['loss_alpha'] = self.loss_alpha(pred_alpha, alpha, weight)
if self.loss_comp is not None:
losses['loss_comp'] = self.loss_comp(pred_alpha, fg, bg,
ori_merged, weight)
return {'losses': losses, 'num_samples': merged.size(0)}
def forward_test(self,
merged,
trimap,
meta,
save_image=False,
save_path=None,
iteration=None):
"""Defines the computation performed at every test call.
Args:
merged (Tensor): Image to predict alpha matte.
trimap (Tensor): Trimap of the input image.
meta (list[dict]): Meta data about the current data batch.
Currently only batch_size 1 is supported. It may contain
information needed to calculate metrics (``ori_alpha`` and
``ori_trimap``) or save predicted alpha matte
(``merged_path``).
save_image (bool, optional): Whether save predicted alpha matte.
Defaults to False.
save_path (str, optional): The directory to save predicted alpha
matte. Defaults to None.
iteration (int, optional): If given as None, the saved alpha matte
will have the same file name with ``merged_path`` in meta dict.
If given as an int, the saved alpha matte would named with
postfix ``_{iteration}.png``. Defaults to None.
Returns:
dict: Contains the predicted alpha and evaluation result.
"""
pred_alpha = self.backbone(torch.cat((merged, trimap), 1))
pred_alpha = pred_alpha.cpu().numpy().squeeze()
pred_alpha = self.restore_shape(pred_alpha, meta)
eval_result = self.evaluate(pred_alpha, meta)
if save_image:
self.save_image(pred_alpha, meta, save_path, iteration)
return {'pred_alpha': pred_alpha, 'eval_result': eval_result}
```
#### File: test_data/test_pipelines/test_random_degradations.py
```python
import numpy as np
import pytest
from mmedit.datasets.pipelines import (DegradationsWithShuffle, RandomBlur,
RandomJPEGCompression, RandomNoise,
RandomResize)
def test_random_noise():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
# Gaussian noise
model = RandomNoise(
params=dict(
noise_type=['gaussian'],
noise_prob=[1],
gaussian_sigma=[0, 50],
gaussian_gray_noise_prob=1),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# Poisson noise
model = RandomNoise(
params=dict(
noise_type=['poisson'],
noise_prob=[1],
poisson_scale=[0, 1],
poisson_gray_noise_prob=1),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# skip degradations with prob < 1
params = dict(
noise_type=['gaussian'],
noise_prob=[1],
gaussian_sigma=[0, 50],
gaussian_gray_noise_prob=1,
prob=0)
model = RandomNoise(params=params, keys=['lq'])
assert model(results) == results
assert repr(model) == model.__class__.__name__ + f'(params={params}, ' \
+ "keys=['lq'])"
def test_random_jpeg_compression():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomJPEGCompression(params=dict(quality=[5, 50]), keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# skip degradations with prob < 1
params = dict(quality=[5, 50], prob=0)
model = RandomJPEGCompression(params=params, keys=['lq'])
assert model(results) == results
assert repr(model) == model.__class__.__name__ + f'(params={params}, ' \
+ "keys=['lq'])"
def test_random_resize():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
# upscale
model = RandomResize(
params=dict(
resize_mode_prob=[1, 0, 0],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.]),
keys=['lq'])
results = model(results)
assert results['lq'].shape[0] >= 8 and results['lq'].shape[1] >= 8
# downscale
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomResize(
params=dict(
resize_mode_prob=[0, 1, 0],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.]),
keys=['lq'])
results = model(results)
assert results['lq'].shape[0] <= 8 and results['lq'].shape[1] <= 8
# keep size
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomResize(
params=dict(
resize_mode_prob=[0, 0, 1],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.]),
keys=['lq'])
results = model(results)
assert results['lq'].shape[0] == 8 and results['lq'].shape[1] == 8
# given target_size
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomResize(
params=dict(
resize_mode_prob=[0, 0, 1],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.],
target_size=(16, 16)),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (16, 16, 3)
# skip degrdation
model = RandomResize(
params=dict(
resize_mode_prob=[1, 0, 0],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.],
prob=0),
keys=['lq'])
assert model(results) == results
with pytest.raises(NotImplementedError):
params = dict(
resize_mode_prob=[1],
resize_scale=[1],
resize_opt=['abc'],
resize_prob=[1])
model = RandomResize(params=params, keys=['lq'])
results = model(results)
assert repr(model) == model.__class__.__name__ + f'(params={params}, ' \
+ "keys=['lq'])"
def test_random_blur():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
# isotropic Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['iso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# anisotropic Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['aniso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# isotropic generalized Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['generalized_iso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# anisotropic generalized Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['generalized_aniso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# isotropic plateau Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['plateau_iso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# anisotropic plateau Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['plateau_aniso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# sinc (kernel size < 13)
model = RandomBlur(
params=dict(
kernel_size=[11],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# sinc (kernel size >= 13)
model = RandomBlur(
params=dict(
kernel_size=[15],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# sinc (given omega)
model = RandomBlur(
params=dict(
kernel_size=[15],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416],
omega=[0.1, 0.1]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# skip degradation
params = dict(
kernel_size=[15],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416],
prob=0)
model = RandomBlur(params=params, keys=['lq'])
assert model(results) == results
assert repr(model) == model.__class__.__name__ + f'(params={params}, ' \
+ "keys=['lq'])"
def test_degradations_with_shuffle():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
# shuffle all
model = DegradationsWithShuffle(
degradations=[
dict(
type='RandomBlur',
params=dict(
kernel_size=[15],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416],
omega=[0.1, 0.1])),
dict(
type='RandomResize',
params=dict(
resize_mode_prob=[0, 0, 1],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.],
target_size=(16, 16))),
[
dict(
type='RandomJPEGCompression',
params=dict(quality=[5, 10])),
dict(
type='RandomJPEGCompression',
params=dict(quality=[15, 20]))
]
],
keys=['lq'],
shuffle_idx=None)
model(results)
# shuffle last 2
degradations = [
dict(
type='RandomBlur',
params=dict(
kernel_size=[15],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416],
omega=[0.1, 0.1])),
dict(
type='RandomResize',
params=dict(
resize_mode_prob=[0, 0, 1],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.],
target_size=(16, 16))),
[
dict(type='RandomJPEGCompression', params=dict(quality=[5, 10])),
dict(type='RandomJPEGCompression', params=dict(quality=[15, 20]))
]
]
model = DegradationsWithShuffle(
degradations=degradations, keys=['lq'], shuffle_idx=(1, 2))
model(results)
assert repr(model) == model.__class__.__name__ \
+ f'(degradations={degradations}, ' \
+ "keys=['lq'], " \
+ 'shuffle_idx=(1, 2))'
```
#### File: test_backbones/test_encoder_decoders/test_deepfill_encoder.py
```python
import torch
from mmedit.models.backbones import ContextualAttentionNeck, DeepFillEncoder
from mmedit.models.common import SimpleGatedConvModule
def test_deepfill_enc():
encoder = DeepFillEncoder()
x = torch.randn((2, 5, 256, 256))
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.stride == (2, 2)
assert encoder.enc2.out_channels == 64
encoder = DeepFillEncoder(encoder_type='stage2_conv')
x = torch.randn((2, 5, 256, 256))
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.out_channels == 32
assert encoder.enc3.out_channels == 64
assert encoder.enc4.out_channels == 64
encoder = DeepFillEncoder(encoder_type='stage2_attention')
x = torch.randn((2, 5, 256, 256))
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.out_channels == 32
assert encoder.enc3.out_channels == 64
assert encoder.enc4.out_channels == 128
if torch.cuda.is_available():
encoder = DeepFillEncoder().cuda()
x = torch.randn((2, 5, 256, 256)).cuda()
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.stride == (2, 2)
assert encoder.enc2.out_channels == 64
encoder = DeepFillEncoder(encoder_type='stage2_conv').cuda()
x = torch.randn((2, 5, 256, 256)).cuda()
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.out_channels == 32
assert encoder.enc3.out_channels == 64
assert encoder.enc4.out_channels == 64
encoder = DeepFillEncoder(encoder_type='stage2_attention').cuda()
x = torch.randn((2, 5, 256, 256)).cuda()
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.out_channels == 32
assert encoder.enc3.out_channels == 64
assert encoder.enc4.out_channels == 128
encoder = DeepFillEncoder(
conv_type='gated_conv', channel_factor=0.75).cuda()
x = torch.randn((2, 5, 256, 256)).cuda()
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 96, 64, 64)
assert isinstance(encoder.enc2, SimpleGatedConvModule)
assert encoder.enc2.conv.stride == (2, 2)
assert encoder.enc2.conv.out_channels == 48 * 2
def test_deepfill_contextual_attention_neck():
# TODO: add unittest for contextual attention module
neck = ContextualAttentionNeck(in_channels=128)
x = torch.rand((2, 128, 64, 64))
mask = torch.zeros((2, 1, 64, 64))
mask[..., 20:100, 23:90] = 1.
res, offset = neck(x, mask)
assert res.shape == (2, 128, 64, 64)
assert offset.shape == (2, 32, 32, 32, 32)
if torch.cuda.is_available():
neck.cuda()
res, offset = neck(x.cuda(), mask.cuda())
assert res.shape == (2, 128, 64, 64)
assert offset.shape == (2, 32, 32, 32, 32)
neck = ContextualAttentionNeck(
in_channels=128, conv_type='gated_conv').cuda()
res, offset = neck(x.cuda(), mask.cuda())
assert res.shape == (2, 128, 64, 64)
assert offset.shape == (2, 32, 32, 32, 32)
assert isinstance(neck.conv1, SimpleGatedConvModule)
```
#### File: test_backbones/test_sr_backbones/test_dic_net.py
```python
import pytest
import torch
import torch.nn as nn
from mmedit.models import build_backbone
from mmedit.models.backbones.sr_backbones.dic_net import (
FeedbackBlock, FeedbackBlockCustom, FeedbackBlockHeatmapAttention)
def test_feedback_block():
x1 = torch.rand(2, 16, 32, 32)
model = FeedbackBlock(16, 3, 8)
x2 = model(x1)
assert x2.shape == x1.shape
x3 = model(x2)
assert x3.shape == x2.shape
def test_feedback_block_custom():
x1 = torch.rand(2, 3, 32, 32)
model = FeedbackBlockCustom(3, 16, 3, 8)
x2 = model(x1)
assert x2.shape == (2, 16, 32, 32)
def test_feedback_block_heatmap_attention():
x1 = torch.rand(2, 16, 32, 32)
heatmap = torch.rand(2, 5, 32, 32)
model = FeedbackBlockHeatmapAttention(16, 2, 8, 5, 2)
x2 = model(x1, heatmap)
assert x2.shape == x1.shape
x3 = model(x2, heatmap)
assert x3.shape == x2.shape
def test_dic_net():
model_cfg = dict(
type='DICNet',
in_channels=3,
out_channels=3,
mid_channels=48,
num_blocks=6,
hg_mid_channels=256,
hg_num_keypoints=68,
num_steps=4,
upscale_factor=8,
detach_attention=False)
# build model
model = build_backbone(model_cfg)
# test attributes
assert model.__class__.__name__ == 'DICNet'
# prepare data
inputs = torch.rand(1, 3, 16, 16)
targets = torch.rand(1, 3, 128, 128)
# prepare loss
loss_function = nn.L1Loss()
# prepare optimizer
optimizer = torch.optim.Adam(model.parameters())
# test on cpu
output, _ = model(inputs)
optimizer.zero_grad()
loss = loss_function(output[-1], targets)
loss.backward()
optimizer.step()
assert len(output) == 4
assert torch.is_tensor(output[-1])
assert output[-1].shape == targets.shape
# test on gpu
if torch.cuda.is_available():
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters())
inputs = inputs.cuda()
targets = targets.cuda()
output, _ = model(inputs)
optimizer.zero_grad()
loss = loss_function(output[-1], targets)
loss.backward()
optimizer.step()
assert len(output) == 4
assert torch.is_tensor(output[-1])
assert output[-1].shape == targets.shape
with pytest.raises(OSError):
model.init_weights('')
with pytest.raises(TypeError):
model.init_weights(1)
```
#### File: test_backbones/test_sr_backbones/test_tdan_net.py
```python
import pytest
import torch
from mmedit.models.backbones.sr_backbones.tdan_net import TDANNet
def test_tdan_net():
"""Test TDANNet."""
# gpu (DCN is available only on GPU)
if torch.cuda.is_available():
tdan = TDANNet().cuda()
input_tensor = torch.rand(1, 5, 3, 64, 64).cuda()
tdan.init_weights(pretrained=None)
output = tdan(input_tensor)
assert len(output) == 2 # (1) HR center + (2) aligned LRs
assert output[0].shape == (1, 3, 256, 256) # HR center frame
assert output[1].shape == (1, 5, 3, 64, 64) # aligned LRs
with pytest.raises(TypeError):
# pretrained should be str or None
tdan.init_weights(pretrained=[1])
```
#### File: test_models/test_extractors/test_lte.py
```python
import pytest
import torch
from mmedit.models import build_component
def test_lte():
model_cfg = dict(
type='LTE',
requires_grad=False,
pixel_range=1.,
pretrained=None,
load_pretrained_vgg=False)
lte = build_component(model_cfg)
assert lte.__class__.__name__ == 'LTE'
x = torch.rand(2, 3, 64, 64)
x_level3, x_level2, x_level1 = lte(x)
assert x_level1.shape == (2, 64, 64, 64)
assert x_level2.shape == (2, 128, 32, 32)
assert x_level3.shape == (2, 256, 16, 16)
lte.init_weights(None)
with pytest.raises(IOError):
model_cfg['pretrained'] = ''
lte = build_component(model_cfg)
x_level3, x_level2, x_level1 = lte(x)
lte.init_weights('')
with pytest.raises(TypeError):
lte.init_weights(1)
```
#### File: test_models/test_inpaintors/test_pconv_inpaintor.py
```python
import os
import tempfile
from unittest.mock import patch
import pytest
import torch
from mmcv import Config
from mmedit.models import build_model
from mmedit.models.losses import PerceptualVGG
@patch.object(PerceptualVGG, 'init_weights')
def test_pconv_inpaintor(init_weights):
cfg = Config.fromfile(
'tests/data/inpaintor_config/pconv_inpaintor_test.py')
if torch.cuda.is_available():
pconv_inpaintor = build_model(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
assert pconv_inpaintor.__class__.__name__ == 'PConvInpaintor'
pconv_inpaintor.cuda()
gt_img = torch.randn((1, 3, 256, 256)).cuda()
mask = torch.zeros_like(gt_img)
mask[..., 50:160, 100:210] = 1.
masked_img = gt_img * (1. - mask)
data_batch = dict(gt_img=gt_img, mask=mask, masked_img=masked_img)
optim_g = torch.optim.SGD(
pconv_inpaintor.generator.parameters(), lr=0.1)
optim_dict = dict(generator=optim_g)
outputs = pconv_inpaintor.train_step(data_batch, optim_dict)
assert outputs['results']['fake_res'].shape == (1, 3, 256, 256)
assert outputs['results']['final_mask'].shape == (1, 3, 256, 256)
assert 'loss_l1_hole' in outputs['log_vars']
assert 'loss_l1_valid' in outputs['log_vars']
assert 'loss_tv' in outputs['log_vars']
# test forward dummy
res = pconv_inpaintor.forward_dummy(
torch.cat([masked_img, mask], dim=1))
assert res.shape == (1, 3, 256, 256)
# test forward test w/o save image
outputs = pconv_inpaintor.forward_test(
masked_img[0:1], mask[0:1], gt_img=gt_img[0:1, ...])
assert 'eval_result' in outputs
assert outputs['eval_result']['l1'] > 0
assert outputs['eval_result']['psnr'] > 0
assert outputs['eval_result']['ssim'] > 0
# test forward test w/o eval metrics
pconv_inpaintor.test_cfg = dict()
pconv_inpaintor.eval_with_metrics = False
outputs = pconv_inpaintor.forward_test(masked_img[0:1], mask[0:1])
for key in ['fake_res', 'fake_img']:
assert outputs[key].size() == (1, 3, 256, 256)
# test forward test w/ save image
with tempfile.TemporaryDirectory() as tmpdir:
outputs = pconv_inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
iteration=4396,
meta=[dict(gt_img_path='igccc.png')])
assert os.path.exists(os.path.join(tmpdir, 'igccc_4396.png'))
# test forward test w/ save image w/ gt_img
with tempfile.TemporaryDirectory() as tmpdir:
outputs = pconv_inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
meta=[dict(gt_img_path='igccc.png')],
gt_img=gt_img[0:1, ...])
assert os.path.exists(os.path.join(tmpdir, 'igccc.png'))
with pytest.raises(AssertionError):
outputs = pconv_inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
iteration=4396,
gt_img=gt_img[0:1, ...])
with pytest.raises(AssertionError):
outputs = pconv_inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=None,
iteration=4396,
meta=[dict(gt_img_path='igccc.png')],
gt_img=gt_img[0:1, ...])
# reset mock to clear some memory usage
init_weights.reset_mock()
```
#### File: mmediting-1/tools/deploy_test.py
```python
import argparse
import warnings
from typing import Any
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.parallel import MMDataParallel
from torch import nn
from mmedit.apis import single_gpu_test
from mmedit.core.export import ONNXRuntimeEditing
from mmedit.datasets import build_dataloader, build_dataset
from mmedit.models import BasicRestorer, build_model
class TensorRTRestorerGenerator(nn.Module):
"""Inner class for tensorrt restorer model inference
Args:
trt_file (str): The path to the tensorrt file.
device_id (int): Which device to place the model.
"""
def __init__(self, trt_file: str, device_id: int):
super().__init__()
from mmcv.tensorrt import TRTWrapper, load_tensorrt_plugin
try:
load_tensorrt_plugin()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with TensorRT from source.')
model = TRTWrapper(
trt_file, input_names=['input'], output_names=['output'])
self.device_id = device_id
self.model = model
def forward(self, x):
with torch.cuda.device(self.device_id), torch.no_grad():
seg_pred = self.model({'input': x})['output']
seg_pred = seg_pred.detach().cpu()
return seg_pred
class TensorRTRestorer(nn.Module):
"""A warper class for tensorrt restorer
Args:
base_model (Any): The base model build from config.
trt_file (str): The path to the tensorrt file.
device_id (int): Which device to place the model.
"""
def __init__(self, base_model: Any, trt_file: str, device_id: int):
super().__init__()
self.base_model = base_model
restorer_generator = TensorRTRestorerGenerator(
trt_file=trt_file, device_id=device_id)
base_model.generator = restorer_generator
def forward(self, lq, gt=None, test_mode=False, **kwargs):
return self.base_model(lq, gt=gt, test_mode=test_mode, **kwargs)
class TensorRTEditing(nn.Module):
"""A class for testing tensorrt deployment
Args:
trt_file (str): The path to the tensorrt file.
cfg (Any): The configuration of the testing, \
decided by the config file.
device_id (int): Which device to place the model.
"""
def __init__(self, trt_file: str, cfg: Any, device_id: int):
super().__init__()
base_model = build_model(
cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
if isinstance(base_model, BasicRestorer):
WrapperClass = TensorRTRestorer
self.wrapper = WrapperClass(base_model, trt_file, device_id)
def forward(self, **kwargs):
return self.wrapper(**kwargs)
def parse_args():
parser = argparse.ArgumentParser(description='mmediting tester')
parser.add_argument('config', help='test config file path')
parser.add_argument('model', help='input model file')
parser.add_argument(
'backend',
help='backend of the model.',
choices=['onnxruntime', 'tensorrt'])
parser.add_argument('--out', help='output result pickle file')
parser.add_argument(
'--save-path',
default=None,
type=str,
help='path to store images and if not given, will not save image')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# init distributed env first, since logger depends on the dist info.
distributed = False
# build the dataloader
dataset = build_dataset(cfg.data.test)
loader_cfg = {
**dict((k, cfg.data[k]) for k in ['workers_per_gpu'] if k in cfg.data),
**dict(
samples_per_gpu=1,
drop_last=False,
shuffle=False,
dist=distributed),
**cfg.data.get('test_dataloader', {})
}
data_loader = build_dataloader(dataset, **loader_cfg)
# build the model
if args.backend == 'onnxruntime':
model = ONNXRuntimeEditing(args.model, cfg=cfg, device_id=0)
elif args.backend == 'tensorrt':
model = TensorRTEditing(args.model, cfg=cfg, device_id=0)
args.save_image = args.save_path is not None
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(
model,
data_loader,
save_path=args.save_path,
save_image=args.save_image)
print()
# print metrics
stats = dataset.evaluate(outputs)
for stat in stats:
print('Eval-{}: {}'.format(stat, stats[stat]))
# save result pickle
if args.out:
print('writing results to {}'.format(args.out))
mmcv.dump(outputs, args.out)
if __name__ == '__main__':
main()
```
#### File: mmediting-1/tools/evaluate_comp1k.py
```python
import argparse
import os.path as osp
import re
import mmcv
import numpy as np
from mmedit.core.evaluation import connectivity, gradient_error, mse, sad
def evaluate_one(args):
"""Function to evaluate one sample of data.
Args:
args (tuple): Information needed to evaluate one sample of data.
Returns:
dict: The evaluation results including sad, mse, gradient error and
connectivity error.
"""
pred_alpha_path, alpha_path, trimap_path = args
pred_alpha = mmcv.imread(pred_alpha_path, flag='grayscale')
alpha = mmcv.imread(alpha_path, flag='grayscale')
if trimap_path is None:
trimap = np.ones_like(alpha)
else:
trimap = mmcv.imread(trimap_path, flag='grayscale')
sad_result = sad(alpha, trimap, pred_alpha)
mse_result = mse(alpha, trimap, pred_alpha)
grad_result = gradient_error(alpha, trimap, pred_alpha)
conn_result = connectivity(alpha, trimap, pred_alpha)
return (sad_result, mse_result, grad_result, conn_result)
def evaluate(pred_root, gt_root, trimap_root, verbose, nproc):
"""Evaluate test results of Adobe composition-1k dataset.
There are 50 different ground truth foregrounds and alpha mattes pairs,
each of the foreground will be composited with 20 different backgrounds,
producing 1000 images for testing. In some repo, the ground truth alpha
matte will be copied 20 times and named the same as the images. This
function accept both original alpha matte folder (contains 50 ground
truth alpha mattes) and copied alpha matte folder (contains 1000 ground
truth alpha mattes) for `gt_root`.
Example of copied name:
```
alpha_matte1.png -> alpha_matte1_0.png
alpha_matte1_1.png
...
alpha_matte1_19.png
alpha_matte1_20.png
```
Args:
pred_root (str): Path to the predicted alpha matte folder.
gt_root (str): Path to the ground truth alpha matte folder.
trimap_root (str): Path to the predicted alpha matte folder.
verbose (bool): Whether print result for each predicted alpha matte.
nproc (int): number of processers.
"""
images = sorted(mmcv.scandir(pred_root))
gt_files_num = len(list(mmcv.scandir(gt_root)))
# If ground truth alpha mattes are not copied (number of files is 50), we
# use the below pattern to recover the name of the original alpha matte.
if gt_files_num == 50:
pattern = re.compile(r'(.+)_(?:\d+)(.png)')
pairs = []
for img in images:
pred_alpha_path = osp.join(pred_root, img)
# if ground truth alpha matte are not copied, recover the original name
if gt_files_num == 50:
groups = pattern.match(img).groups()
alpha_path = osp.join(gt_root, ''.join(groups))
# if ground truth alpha matte are copied, the name should be the same
else: # gt_files_num == 1000
alpha_path = osp.join(gt_root, img)
trimap_path = (
osp.join(trimap_root, img) if trimap_root is not None else None)
pairs.append((pred_alpha_path, alpha_path, trimap_path))
results = mmcv.track_parallel_progress(evaluate_one, pairs, nproc)
if verbose:
# for sad_result, mse_result, grad_result, conn_result in results:
for i, img in enumerate(images):
sad_result, mse_result, grad_result, conn_result = results[i]
print(f'{img} SAD: {sad_result:.6g} MSE: {mse_result:.6g} '
f'GRAD: {grad_result:.6g} CONN: {conn_result:.6g}')
sad_mean, mse_mean, grad_mean, conn_mean = np.mean(results, axis=0)
print(f'MEAN: SAD: {sad_mean:.6g} MSE: {mse_mean:.6g} '
f'GRAD: {grad_mean:.6g} CONN: {conn_mean:.6g}')
def parse_args():
parser = argparse.ArgumentParser(
description='evaluate composition-1k prediction result')
parser.add_argument(
'pred_root', help='Path to the predicted alpha matte folder')
parser.add_argument(
'gt_root', help='Path to the ground truth alpha matte folder')
parser.add_argument(
'--trimap_root',
help='Path to trimap folder. If not specified, '
'results are calculated on the full image.')
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Whether print result for each predicted alpha matte')
parser.add_argument(
'--nproc', type=int, default=4, help='number of processers')
return parser.parse_args()
def main():
args = parse_args()
if not osp.exists(args.pred_root):
raise FileNotFoundError(f'pred_root {args.pred_root} not found')
if not osp.exists(args.gt_root):
raise FileNotFoundError(f'gt_root {args.gt_root} not found')
evaluate(args.pred_root, args.gt_root, args.trimap_root, args.verbose,
args.nproc)
if __name__ == '__main__':
main()
``` |
{
"source": "Jian137/Real-ESRGAN",
"score": 3
} |
#### File: Real-ESRGAN/dataset_mine/degradations.py
```python
import cv2
import math
import numpy as np
import random
import torch
from scipy import special
from scipy.stats import multivariate_normal
from torchvision.transforms.functional_tensor import rgb_to_grayscale
# -------------------------------------------------------------------- #
# --------------------------- blur kernels --------------------------- #
# -------------------------------------------------------------------- #
# --------------------------- util functions --------------------------- #
def sigma_matrix2(sig_x, sig_y, theta):
"""Calculate the rotated sigma matrix (two dimensional matrix).
Args:
sig_x (float):
sig_y (float):
theta (float): Radian measurement.
Returns:
ndarray: Rotated sigma matrix.
"""
d_matrix = np.array([[sig_x**2, 0], [0, sig_y**2]])
u_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
return np.dot(u_matrix, np.dot(d_matrix, u_matrix.T))
def mesh_grid(kernel_size):
"""Generate the mesh grid, centering at zero.
Args:
kernel_size (int):
Returns:
xy (ndarray): with the shape (kernel_size, kernel_size, 2)
xx (ndarray): with the shape (kernel_size, kernel_size)
yy (ndarray): with the shape (kernel_size, kernel_size)
"""
ax = np.arange(-kernel_size // 2 + 1., kernel_size // 2 + 1.)
xx, yy = np.meshgrid(ax, ax)
xy = np.hstack((xx.reshape((kernel_size * kernel_size, 1)), yy.reshape(kernel_size * kernel_size,
1))).reshape(kernel_size, kernel_size, 2)
return xy, xx, yy
def pdf2(sigma_matrix, grid):
"""Calculate PDF of the bivariate Gaussian distribution.
Args:
sigma_matrix (ndarray): with the shape (2, 2)
grid (ndarray): generated by :func:`mesh_grid`,
with the shape (K, K, 2), K is the kernel size.
Returns:
kernel (ndarrray): un-normalized kernel.
"""
inverse_sigma = np.linalg.inv(sigma_matrix)
kernel = np.exp(-0.5 * np.sum(np.dot(grid, inverse_sigma) * grid, 2))
return kernel
def cdf2(d_matrix, grid):
"""Calculate the CDF of the standard bivariate Gaussian distribution.
Used in skewed Gaussian distribution.
Args:
d_matrix (ndarrasy): skew matrix.
grid (ndarray): generated by :func:`mesh_grid`,
with the shape (K, K, 2), K is the kernel size.
Returns:
cdf (ndarray): skewed cdf.
"""
rv = multivariate_normal([0, 0], [[1, 0], [0, 1]])
grid = np.dot(grid, d_matrix)
cdf = rv.cdf(grid)
return cdf
def bivariate_Gaussian(kernel_size, sig_x, sig_y, theta, grid=None, isotropic=True):
"""Generate a bivariate isotropic or anisotropic Gaussian kernel.
In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
Args:
kernel_size (int):
sig_x (float):
sig_y (float):
theta (float): Radian measurement.
grid (ndarray, optional): generated by :func:`mesh_grid`,
with the shape (K, K, 2), K is the kernel size. Default: None
isotropic (bool):
Returns:
kernel (ndarray): normalized kernel.
"""
if grid is None:
grid, _, _ = mesh_grid(kernel_size)
if isotropic:
sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
else:
sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
kernel = pdf2(sigma_matrix, grid)
kernel = kernel / np.sum(kernel)
return kernel
def bivariate_generalized_Gaussian(kernel_size, sig_x, sig_y, theta, beta, grid=None, isotropic=True):
"""Generate a bivariate generalized Gaussian kernel.
Described in `Parameter Estimation For Multivariate Generalized
Gaussian Distributions`_
by Pascal et. al (2013).
In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
Args:
kernel_size (int):
sig_x (float):
sig_y (float):
theta (float): Radian measurement.
beta (float): shape parameter, beta = 1 is the normal distribution.
grid (ndarray, optional): generated by :func:`mesh_grid`,
with the shape (K, K, 2), K is the kernel size. Default: None
Returns:
kernel (ndarray): normalized kernel.
.. _Parameter Estimation For Multivariate Generalized Gaussian
Distributions: https://arxiv.org/abs/1302.6498
"""
if grid is None:
grid, _, _ = mesh_grid(kernel_size)
if isotropic:
sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
else:
sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
inverse_sigma = np.linalg.inv(sigma_matrix)
kernel = np.exp(-0.5 * np.power(np.sum(np.dot(grid, inverse_sigma) * grid, 2), beta))
kernel = kernel / np.sum(kernel)
return kernel
def bivariate_plateau(kernel_size, sig_x, sig_y, theta, beta, grid=None, isotropic=True):
"""Generate a plateau-like anisotropic kernel.
1 / (1+x^(beta))
Ref: https://stats.stackexchange.com/questions/203629/is-there-a-plateau-shaped-distribution
In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
Args:
kernel_size (int):
sig_x (float):
sig_y (float):
theta (float): Radian measurement.
beta (float): shape parameter, beta = 1 is the normal distribution.
grid (ndarray, optional): generated by :func:`mesh_grid`,
with the shape (K, K, 2), K is the kernel size. Default: None
Returns:
kernel (ndarray): normalized kernel.
"""
if grid is None:
grid, _, _ = mesh_grid(kernel_size)
if isotropic:
sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
else:
sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
inverse_sigma = np.linalg.inv(sigma_matrix)
kernel = np.reciprocal(np.power(np.sum(np.dot(grid, inverse_sigma) * grid, 2), beta) + 1)
kernel = kernel / np.sum(kernel)
return kernel
def random_bivariate_Gaussian(kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
noise_range=None,
isotropic=True):
"""Randomly generate bivariate isotropic or anisotropic Gaussian kernels.
In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
Args:
kernel_size (int):
sigma_x_range (tuple): [0.6, 5]
sigma_y_range (tuple): [0.6, 5]
rotation range (tuple): [-math.pi, math.pi]
noise_range(tuple, optional): multiplicative kernel noise,
[0.75, 1.25]. Default: None
Returns:
kernel (ndarray):
"""
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
if isotropic is False:
assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
else:
sigma_y = sigma_x
rotation = 0
kernel = bivariate_Gaussian(kernel_size, sigma_x, sigma_y, rotation, isotropic=isotropic)
# add multiplicative noise
if noise_range is not None:
assert noise_range[0] < noise_range[1], 'Wrong noise range.'
noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
kernel = kernel * noise
kernel = kernel / np.sum(kernel)
return kernel
def random_bivariate_generalized_Gaussian(kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
beta_range,
noise_range=None,
isotropic=True):
"""Randomly generate bivariate generalized Gaussian kernels.
In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
Args:
kernel_size (int):
sigma_x_range (tuple): [0.6, 5]
sigma_y_range (tuple): [0.6, 5]
rotation range (tuple): [-math.pi, math.pi]
beta_range (tuple): [0.5, 8]
noise_range(tuple, optional): multiplicative kernel noise,
[0.75, 1.25]. Default: None
Returns:
kernel (ndarray):
"""
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
if isotropic is False:
assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
else:
sigma_y = sigma_x
rotation = 0
# assume beta_range[0] < 1 < beta_range[1]
if np.random.uniform() < 0.5:
beta = np.random.uniform(beta_range[0], 1)
else:
beta = np.random.uniform(1, beta_range[1])
kernel = bivariate_generalized_Gaussian(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic)
# add multiplicative noise
if noise_range is not None:
assert noise_range[0] < noise_range[1], 'Wrong noise range.'
noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
kernel = kernel * noise
kernel = kernel / np.sum(kernel)
return kernel
def random_bivariate_plateau(kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
beta_range,
noise_range=None,
isotropic=True):
"""Randomly generate bivariate plateau kernels.
In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
Args:
kernel_size (int):
sigma_x_range (tuple): [0.6, 5]
sigma_y_range (tuple): [0.6, 5]
rotation range (tuple): [-math.pi/2, math.pi/2]
beta_range (tuple): [1, 4]
noise_range(tuple, optional): multiplicative kernel noise,
[0.75, 1.25]. Default: None
Returns:
kernel (ndarray):
"""
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
if isotropic is False:
assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
else:
sigma_y = sigma_x
rotation = 0
# TODO: this may be not proper
if np.random.uniform() < 0.5:
beta = np.random.uniform(beta_range[0], 1)
else:
beta = np.random.uniform(1, beta_range[1])
kernel = bivariate_plateau(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic)
# add multiplicative noise
if noise_range is not None:
assert noise_range[0] < noise_range[1], 'Wrong noise range.'
noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
kernel = kernel * noise
kernel = kernel / np.sum(kernel)
return kernel
def random_mixed_kernels(kernel_list,
kernel_prob,
kernel_size=21,
sigma_x_range=(0.6, 5),
sigma_y_range=(0.6, 5),
rotation_range=(-math.pi, math.pi),
betag_range=(0.5, 8),
betap_range=(0.5, 8),
noise_range=None):
"""Randomly generate mixed kernels.
Args:
kernel_list (tuple): a list name of kernel types,
support ['iso', 'aniso', 'skew', 'generalized', 'plateau_iso',
'plateau_aniso']
kernel_prob (tuple): corresponding kernel probability for each
kernel type
kernel_size (int):
sigma_x_range (tuple): [0.6, 5]
sigma_y_range (tuple): [0.6, 5]
rotation range (tuple): [-math.pi, math.pi]
beta_range (tuple): [0.5, 8]
noise_range(tuple, optional): multiplicative kernel noise,
[0.75, 1.25]. Default: None
Returns:
kernel (ndarray):
"""
kernel_type = random.choices(kernel_list, kernel_prob)[0]
if kernel_type == 'iso':
kernel = random_bivariate_Gaussian(
kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=True)
elif kernel_type == 'aniso':
kernel = random_bivariate_Gaussian(
kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=False)
elif kernel_type == 'generalized_iso':
kernel = random_bivariate_generalized_Gaussian(
kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
betag_range,
noise_range=noise_range,
isotropic=True)
elif kernel_type == 'generalized_aniso':
kernel = random_bivariate_generalized_Gaussian(
kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
betag_range,
noise_range=noise_range,
isotropic=False)
elif kernel_type == 'plateau_iso':
kernel = random_bivariate_plateau(
kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=True)
elif kernel_type == 'plateau_aniso':
kernel = random_bivariate_plateau(
kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=False)
return kernel
np.seterr(divide='ignore', invalid='ignore')
def circular_lowpass_kernel(cutoff, kernel_size, pad_to=0):
"""2D sinc filter, ref: https://dsp.stackexchange.com/questions/58301/2-d-circularly-symmetric-low-pass-filter
Args:
cutoff (float): cutoff frequency in radians (pi is max)
kernel_size (int): horizontal and vertical size, must be odd.
pad_to (int): pad kernel size to desired size, must be odd or zero.
"""
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
kernel = np.fromfunction(
lambda x, y: cutoff * special.j1(cutoff * np.sqrt(
(x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)) / (2 * np.pi * np.sqrt(
(x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)), [kernel_size, kernel_size])
kernel[(kernel_size - 1) // 2, (kernel_size - 1) // 2] = cutoff**2 / (4 * np.pi)
kernel = kernel / np.sum(kernel)
if pad_to > kernel_size:
pad_size = (pad_to - kernel_size) // 2
kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
return kernel
# ------------------------------------------------------------- #
# --------------------------- noise --------------------------- #
# ------------------------------------------------------------- #
# ----------------------- Gaussian Noise ----------------------- #
def generate_gaussian_noise(img, sigma=10, gray_noise=False):
"""Generate Gaussian noise.
Args:
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
sigma (float): Noise scale (measured in range 255). Default: 10.
Returns:
(Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
float32.
"""
if gray_noise:
noise = np.float32(np.random.randn(*(img.shape[0:2]))) * sigma / 255.
noise = np.expand_dims(noise, axis=2).repeat(3, axis=2)
else:
noise = np.float32(np.random.randn(*(img.shape))) * sigma / 255.
return noise
def add_gaussian_noise(img, sigma=10, clip=True, rounds=False, gray_noise=False):
"""Add Gaussian noise.
Args:
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
sigma (float): Noise scale (measured in range 255). Default: 10.
Returns:
(Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
float32.
"""
noise = generate_gaussian_noise(img, sigma, gray_noise)
out = img + noise
if clip and rounds:
out = np.clip((out * 255.0).round(), 0, 255) / 255.
elif clip:
out = np.clip(out, 0, 1)
elif rounds:
out = (out * 255.0).round() / 255.
return out
def generate_gaussian_noise_pt(img, sigma=10, gray_noise=0):
"""Add Gaussian noise (PyTorch version).
Args:
img (Tensor): Shape (b, c, h, w), range[0, 1], float32.
scale (float | Tensor): Noise scale. Default: 1.0.
Returns:
(Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
float32.
"""
b, _, h, w = img.size()
if not isinstance(sigma, (float, int)):
sigma = sigma.view(img.size(0), 1, 1, 1)
if isinstance(gray_noise, (float, int)):
cal_gray_noise = gray_noise > 0
else:
gray_noise = gray_noise.view(b, 1, 1, 1)
cal_gray_noise = torch.sum(gray_noise) > 0
if cal_gray_noise:
noise_gray = torch.randn(*img.size()[2:4], dtype=img.dtype, device=img.device) * sigma / 255.
noise_gray = noise_gray.view(b, 1, h, w)
# always calculate color noise
noise = torch.randn(*img.size(), dtype=img.dtype, device=img.device) * sigma / 255.
if cal_gray_noise:
noise = noise * (1 - gray_noise) + noise_gray * gray_noise
return noise
def add_gaussian_noise_pt(img, sigma=10, gray_noise=0, clip=True, rounds=False):
"""Add Gaussian noise (PyTorch version).
Args:
img (Tensor): Shape (b, c, h, w), range[0, 1], float32.
scale (float | Tensor): Noise scale. Default: 1.0.
Returns:
(Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
float32.
"""
noise = generate_gaussian_noise_pt(img, sigma, gray_noise)
out = img + noise
if clip and rounds:
out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
elif clip:
out = torch.clamp(out, 0, 1)
elif rounds:
out = (out * 255.0).round() / 255.
return out
# ----------------------- Random Gaussian Noise ----------------------- #
def random_generate_gaussian_noise(img, sigma_range=(0, 10), gray_prob=0):
sigma = np.random.uniform(sigma_range[0], sigma_range[1])
if np.random.uniform() < gray_prob:
gray_noise = True
else:
gray_noise = False
return generate_gaussian_noise(img, sigma, gray_noise)
def random_add_gaussian_noise(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
noise = random_generate_gaussian_noise(img, sigma_range, gray_prob)
out = img + noise
if clip and rounds:
out = np.clip((out * 255.0).round(), 0, 255) / 255.
elif clip:
out = np.clip(out, 0, 1)
elif rounds:
out = (out * 255.0).round() / 255.
return out
def random_generate_gaussian_noise_pt(img, sigma_range=(0, 10), gray_prob=0):
sigma = torch.rand(
img.size(0), dtype=img.dtype, device=img.device) * (sigma_range[1] - sigma_range[0]) + sigma_range[0]
gray_noise = torch.rand(img.size(0), dtype=img.dtype, device=img.device)
gray_noise = (gray_noise < gray_prob).float()
return generate_gaussian_noise_pt(img, sigma, gray_noise)
def random_add_gaussian_noise_pt(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
noise = random_generate_gaussian_noise_pt(img, sigma_range, gray_prob)
out = img + noise
if clip and rounds:
out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
elif clip:
out = torch.clamp(out, 0, 1)
# out = out/255.0
elif rounds:
out = (out * 255.0).round() / 255.
return out
# ----------------------- Poisson (Shot) Noise ----------------------- #
def generate_poisson_noise(img, scale=1.0, gray_noise=False):
"""Generate poisson noise.
Ref: https://github.com/scikit-image/scikit-image/blob/main/skimage/util/noise.py#L37-L219
Args:
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
scale (float): Noise scale. Default: 1.0.
gray_noise (bool): Whether generate gray noise. Default: False.
Returns:
(Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
float32.
"""
if gray_noise:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# round and clip image for counting vals correctly
img = np.clip((img * 255.0).round(), 0, 255) / 255.
vals = len(np.unique(img))
vals = 2**np.ceil(np.log2(vals))
out = np.float32(np.random.poisson(img * vals) / float(vals))
noise = out - img
if gray_noise:
noise = np.repeat(noise[:, :, np.newaxis], 3, axis=2)
return noise * scale
def add_poisson_noise(img, scale=1.0, clip=True, rounds=False, gray_noise=False):
"""Add poisson noise.
Args:
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
scale (float): Noise scale. Default: 1.0.
gray_noise (bool): Whether generate gray noise. Default: False.
Returns:
(Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
float32.
"""
noise = generate_poisson_noise(img, scale, gray_noise)
out = img + noise
if clip and rounds:
out = np.clip((out * 255.0).round(), 0, 255) / 255.
elif clip:
out = np.clip(out, 0, 1)
elif rounds:
out = (out * 255.0).round() / 255.
return out
def generate_poisson_noise_pt(img, scale=1.0, gray_noise=0):
"""Generate a batch of poisson noise (PyTorch version)
Args:
img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32.
scale (float | Tensor): Noise scale. Number or Tensor with shape (b).
Default: 1.0.
gray_noise (float | Tensor): 0-1 number or Tensor with shape (b).
0 for False, 1 for True. Default: 0.
Returns:
(Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
float32.
"""
b, _, h, w = img.size()
if isinstance(gray_noise, (float, int)):
cal_gray_noise = gray_noise > 0
else:
gray_noise = gray_noise.view(b, 1, 1, 1)
cal_gray_noise = torch.sum(gray_noise) > 0
if cal_gray_noise:
img_gray = rgb_to_grayscale(img, num_output_channels=1)
# round and clip image for counting vals correctly
img_gray = torch.clamp((img_gray * 255.0).round(), 0, 255) / 255.
# use for-loop to get the unique values for each sample
vals_list = [len(torch.unique(img_gray[i, :, :, :])) for i in range(b)]
vals_list = [2**np.ceil(np.log2(vals)) for vals in vals_list]
vals = img_gray.new_tensor(vals_list).view(b, 1, 1, 1)
out = torch.poisson(img_gray * vals) / vals
noise_gray = out - img_gray
noise_gray = noise_gray.expand(b, 3, h, w)
# always calculate color noise
# round and clip image for counting vals correctly
img = torch.clamp((img * 255.0).round(), 0, 255) / 255.
# use for-loop to get the unique values for each sample
vals_list = [len(torch.unique(img[i, :, :, :])) for i in range(b)]
vals_list = [2**np.ceil(np.log2(vals)) for vals in vals_list]
vals = img.new_tensor(vals_list).view(b, 1, 1, 1)
out = torch.poisson(img * vals) / vals
noise = out - img
if cal_gray_noise:
noise = noise * (1 - gray_noise) + noise_gray * gray_noise
if not isinstance(scale, (float, int)):
scale = scale.view(b, 1, 1, 1)
return noise * scale
def add_poisson_noise_pt(img, scale=1.0, clip=True, rounds=False, gray_noise=0):
"""Add poisson noise to a batch of images (PyTorch version).
Args:
img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32.
scale (float | Tensor): Noise scale. Number or Tensor with shape (b).
Default: 1.0.
gray_noise (float | Tensor): 0-1 number or Tensor with shape (b).
0 for False, 1 for True. Default: 0.
Returns:
(Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
float32.
"""
noise = generate_poisson_noise_pt(img, scale, gray_noise)
out = img + noise
if clip and rounds:
out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
elif clip:
out = torch.clamp(out, 0, 1)
elif rounds:
out = (out * 255.0).round() / 255.
return out
# ----------------------- Random Poisson (Shot) Noise ----------------------- #
def random_generate_poisson_noise(img, scale_range=(0, 1.0), gray_prob=0):
scale = np.random.uniform(scale_range[0], scale_range[1])
if np.random.uniform() < gray_prob:
gray_noise = True
else:
gray_noise = False
return generate_poisson_noise(img, scale, gray_noise)
def random_add_poisson_noise(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
noise = random_generate_poisson_noise(img, scale_range, gray_prob)
out = img + noise
if clip and rounds:
out = np.clip((out * 255.0).round(), 0, 255) / 255.
elif clip:
out = np.clip(out, 0, 1)
elif rounds:
out = (out * 255.0).round() / 255.
return out
def random_generate_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0):
scale = torch.rand(
img.size(0), dtype=img.dtype, device=img.device) * (scale_range[1] - scale_range[0]) + scale_range[0]
gray_noise = torch.rand(img.size(0), dtype=img.dtype, device=img.device)
gray_noise = (gray_noise < gray_prob).float()
return generate_poisson_noise_pt(img, scale, gray_noise)
def random_add_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
noise = random_generate_poisson_noise_pt(img, scale_range, gray_prob)
out = img + noise
if clip and rounds:
out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
elif clip:
out = torch.clamp(out, 0, 1)
# out = out/255.0
elif rounds:
out = (out * 255.0).round() / 255.
return out
# ------------------------------------------------------------------------ #
# --------------------------- JPEG compression --------------------------- #
# ------------------------------------------------------------------------ #
def add_jpg_compression(img, quality=90):
"""Add JPG compression artifacts.
Args:
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
quality (float): JPG compression quality. 0 for lowest quality, 100 for
best quality. Default: 90.
Returns:
(Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1],
float32.
"""
img = np.clip(img, 0, 1)
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
_, encimg = cv2.imencode('.jpg', img * 255., encode_param)
img = np.float32(cv2.imdecode(encimg, 1)) / 255.
return img
def random_add_jpg_compression(img, quality_range=(90, 100)):
"""Randomly add JPG compression artifacts.
Args:
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
quality_range (tuple[float] | list[float]): JPG compression quality
range. 0 for lowest quality, 100 for best quality.
Default: (90, 100).
Returns:
(Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1],
float32.
"""
quality = np.random.uniform(quality_range[0], quality_range[1])
return add_jpg_compression(img, quality)
``` |
{
"source": "jian17/models",
"score": 2
} |
#### File: models/data_util/extract_dataset.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import tensorflow as tf
import glob
import shutil
flags = tf.compat.v1.app.flags
flags.DEFINE_string('image_path','','Path to dataset')
flags.DEFINE_string('sorted_image_path','','Path to sorted images')
FLAGS = flags.FLAGS
def remove_unnecessary_files(image_path):
regex = ['\**\*.csv', '\**\*depth.*']
for r in regex:
filelist = glob.glob(image_path + r, recursive=True)
for filepath in filelist:
if os.path.exists(filepath):
os.remove(filepath)
def copy_files_to_destination(image_path, destination_path):
i = 1
gesture_labels = ['G' + str(i+1) for i in range(10)]
for g in gesture_labels:
filelist = glob.glob(image_path + '\**\{}\*.*'.format(g), recursive=True)
for f in filelist:
shutil.copy(f, destination_path + '\{}.jpg'.format(i))
i+=1
def copy_file(image_path):
r = '\**\G1\*.*'
filelist = glob.glob(image_path + r, recursive=True)
print(filelist)
def main(_):
path = FLAGS.image_path
remove_unnecessary_files(path)
destination_path = path + '\images'
if not os.path.exists(destination_path):
os.makedirs(destination_path)
copy_files_to_destination(path, destination_path)
if __name__ == '__main__':
tf.compat.v1.app.run()
``` |
{
"source": "jianafeng55/Recommender_System_Movies-",
"score": 3
} |
#### File: jianafeng55/Recommender_System_Movies-/most_popular.py
```python
import random
import math
from collections import defaultdict
from operator import itemgetter
import similarity
import utils
class MostPopular:
"""
Recommend via Random Choice.
Top-N recommendation.
"""
def __init__(self, n_rec_movie=10, save_model=True):
"""
Init MostPopular with n_rec_movie.
:return: None
"""
print("MostPopular start...\n")
self.n_rec_movie = n_rec_movie
self.trainset = None
self.save_model = save_model
def fit(self, trainset):
"""
Fit the trainset via count movies.
:param trainset: train dataset
:return: None
"""
model_manager = utils.ModelManager()
try:
self.movie_popular = model_manager.load_model('movie_popular')
self.movie_count = model_manager.load_model('movie_count')
self.trainset = model_manager.load_model('trainset')
self.total_movies = model_manager.load_model('total_movies')
self.movie_popular_sort = model_manager.load_model('movie_popular_sort')
print('MostPopular model has saved before.\nLoad model success...\n')
except OSError:
print('No model saved before.\nTrain a new model...')
self.trainset = trainset
self.movie_popular, self.movie_count = similarity.calculate_movie_popular(trainset)
self.total_movies = list(self.movie_popular.keys())
self.movie_popular_sort = sorted(self.movie_popular.items(), key=itemgetter(1), reverse=True)
print('Train a new model success.')
if self.save_model:
model_manager.save_model(self.movie_popular, 'movie_popular')
model_manager.save_model(self.movie_count, 'movie_count')
model_manager.save_model(self.total_movies, 'total_movies')
model_manager.save_model(self.movie_popular_sort, 'movie_popular_sort')
print('The new model has saved success.\n')
def recommend(self, user):
"""
Random recommend N movies for the user.
:param user: The user we recommend movies to.
:return: the N best score movies
"""
if not self.n_rec_movie or not self.trainset or not self.movie_popular \
or not self.movie_count or not self.movie_popular_sort:
raise NotImplementedError('MostPopular has not init or fit method has not called yet.')
N = self.n_rec_movie
# Recommend N most popular movies for the user.
predict_movies = list()
watched_movies = self.trainset[user]
for movie, _ in self.movie_popular_sort:
if len(predict_movies) < N and movie not in watched_movies:
predict_movies.append(movie)
return predict_movies
def test(self, testset):
"""
Test the recommendation system by recommending scores to all users in testset.
:param testset: test dataset
:return:
"""
if not self.n_rec_movie or not self.trainset or not self.movie_popular \
or not self.movie_count or not self.movie_popular_sort:
raise ValueError('UserCF has not init or fit method has not called yet.')
self.testset = testset
print('Test recommendation system start...')
N = self.n_rec_movie
# varables for precision and recall
hit = 0
rec_count = 0
test_count = 0
# varables for coverage
all_rec_movies = set()
# varables for popularity
popular_sum = 0
# record the calculate time has spent.
test_time = utils.LogTime(print_step=1000)
for i, user in enumerate(self.trainset):
test_movies = self.testset.get(user, {})
rec_movies = self.recommend(user) # type:list
for movie in rec_movies:
if movie in test_movies:
hit += 1
all_rec_movies.add(movie)
popular_sum += math.log(1 + self.movie_popular[movie])
# log steps and times.
rec_count += N
test_count += len(test_movies)
# print time per 500 times.
test_time.count_time()
precision = hit / (1.0 * rec_count)
recall = hit / (1.0 * test_count)
coverage = len(all_rec_movies) / (1.0 * self.movie_count)
popularity = popular_sum / (1.0 * rec_count)
print('Test recommendation system success.')
test_time.finish()
print('precision=%.4f\trecall=%.4f\tcoverage=%.4f\tpopularity=%.4f\n' %
(precision, recall, coverage, popularity))
def predict(self, testset):
"""
Recommend movies to all users in testset.
:param testset: test dataset
:return: `dict` : recommend list for each user.
"""
movies_recommend = defaultdict(list)
print('Predict scores start...')
# record the calculate time has spent.
predict_time = utils.LogTime(print_step=500)
for i, user in enumerate(testset):
rec_movies = self.recommend(user) # type:list
movies_recommend[user].append(rec_movies)
# log steps and times.
predict_time.count_time()
print('Predict scores success.')
predict_time.finish()
return movies_recommend
``` |
{
"source": "jianajavier/Quiet-Text",
"score": 2
} |
#### File: Quiet-Text/src/quiet_syntax_highlighting.py
```python
import tkinter as tk
import math
import yaml
import tkinter.font as tk_font
from pygments import lex
from pygments.lexers import PythonLexer, CLexer, JavascriptLexer
from quiet_zutilityfuncs import load_settings_data, store_settings_data
class SyntaxHighlighting():
def __init__(self, parent, text_widget, initial_content):
self.settings = load_settings_data()
self.parent = parent
self.text = text_widget
self.font_family = self.parent.font_family
self.font_size = self.parent.font_size
self.previousContent = initial_content
self.lexer = PythonLexer
self.comment_tokens = [
"Token.Comment.Single",
]
self.string_tokens = [
"Token.Name.Function",
"Token.Name.Class",
"Token.String",
"Token.Literal.String.Single",
"Token.Literal.String.Double"
]
self.object_tokens = [
"Token.Name.Class",
"Token.Name.Function",
]
self.number_tokens = [
"Token.Keyword.Constant",
"Token.Literal.String.Interpol",
"Token.Literal.Number.Integer",
"Token.Literal.Number.Float",
"Token.Name.Decorator",
]
self.keyword_tokens = [
"Token.Operator",
"Token.Operator.Word",
"Token.Keyword.Namespace",
]
self.function_tokens = [
"Token.Keyword",
"Token.Name.Builtin",
"Token.Literal.String.Affix",
"Token.Name.Function.Magic",
]
self.class_tokens = [
"Token.Name.Builtin.Pseudo",
]
self.variable_tokens = [
"Token.Name.Namespace",
]
self.comment_color = '#928374'
self.string_color = '#b8bb26'
self.number_color = '#d3869b'
self.keyword_color = '#fe8019'
self.function_color = '#8ec87c'
self.class_color = '#d3869b'
self.object_color = '#b8bb26'
def default_highlight(self):
row = float(self.text.index(tk.INSERT))
row = str(math.trunc(row))
content = self.text.get("1.0", tk.END)
lines = content.split("\n")
if (self.previousContent != content):
self.text.mark_set("range_start", row + ".0")
data = self.text.get(row + ".0", row + "." + str(len(lines[int(row) - 1])))
for token, content in lex(data, self.lexer()):
self.text.mark_set("range_end", "range_start + %dc" % len(content))
self.text.tag_add(str(token), "range_start", "range_end")
self.text.mark_set("range_start", "range_end")
self.previousContent = self.text.get("1.0", tk.END)
def syntax_theme_configuration(self):
for token in self.comment_tokens:
self.text.tag_configure(token, foreground=self.comment_color)
for token in self.string_tokens:
self.text.tag_configure(token, foreground=self.string_color)
for token in self.number_tokens:
self.text.tag_configure(token, foreground=self.number_color)
for token in self.keyword_tokens:
self.text.tag_configure(token, foreground=self.keyword_color)
for token in self.function_tokens:
self.text.tag_configure(token, foreground=self.function_color)
for token in self.class_tokens:
self.text.tag_configure(token, foreground=self.class_color, font=self.parent.italics, size=self.font_size)
for token in self.object_tokens:
self.text.tag_configure(token, foreground=self.object_color)
def initial_highlight(self, *args):
content = self.text.get("1.0", tk.END)
self.text.mark_set("range_start", "1.0")
data = self.text.get("1.0", tk.END)
for token, content in lex(data, PythonLexer()):
self.text.mark_set("range_end", "range_start + %dc" % len(content))
self.text.tag_add(str(token), "range_start", "range_end")
self.text.mark_set("range_start", "range_end")
self.previousContent = self.text.get("1.0", tk.END)
self.syntax_theme_configuration()
def load_new_theme(self, path):
with open(path) as new_theme_config:
new_config = yaml.load(new_theme_config, Loader=yaml.FullLoader)
self.comment_color = new_config['comment_color']
self.string_color = new_config['string_color']
self.number_color = new_config['number_color']
self.keyword_color = new_config['keyword_color']
self.function_color = new_config['function_color']
self.class_color = new_config['class_color']
self.object_color = new_config['object_color']
settings = load_settings_data()
settings['menu_fg'] = new_config['comment_color']
settings['menu_bg'] = new_config['bg_color']
settings['font_color'] = new_config['font_color']
settings['textarea_background_color'] = new_config['bg_color']
settings['menubar_active_bg'] = new_config['menu_bg_active']
settings['menubar_active_fg'] = new_config['menu_fg_active']
settings['menu_active_bg'] = new_config['menu_bg_active']
settings['menu_active_fg'] = new_config['menu_fg_active']
store_settings_data(settings)
self.parent.reconfigure_settings()
self.syntax_theme_configuration()
self.initial_highlight()
``` |
{
"source": "jianan06/MGH_ECG",
"score": 3
} |
#### File: jianan06/MGH_ECG/feature_extract.py
```python
import mne
import neurokit2 as nk
import numpy as np
from scipy.stats import linregress
from biosppy.signals import ecg
def extract_ecg_features(ecg_epoch, fs):
'''
extract ecg features from \ecg_epoch\
:param ecg_epoch:
:return:
'''
#detect R peaks
# Find peaks
peaks, info = nk.ecg_peaks(ecg_epoch, sampling_rate=fs)
R_peaks = info['ECG_R_Peaks']
# Compute HRV indices
hrv = nk.hrv(peaks, sampling_rate=fs, show=True)
feature_names = list(hrv.columns)
features = hrv.values.flatten()
"""
# compute entropy
sampen = nk.entropy_sample(ecg_epoch)
# combine sample entropy to existing ones
features = np.r_[features, sampen]
feature_names.append('SampEn')
"""
# cardiopulmonary coupling
# ????
return features, feature_names
"""
R_peak = biosppy.signals.ecg.christove_segmenter(signal = ecg_epoch, sampling_rate = fs)
# low frequency
if sampling_rate <
return low_frequency
# high frequency
if sampling_rate >
return high_frequency
# very low frequency
if sampling_rate <
return very_low_frequency
"""
def band_power(psd, freq, low, high, return_db=True):
band_psd = psd[(freq>low) & (freq<high)]
bp = band_psd.sum()*(freq[1]-freq[0])
if return_db:
bp = 10*np.log10(bp)
return bp
def extract_abd_features(abd_epoch, fs):
'''
extract abd features from \abd_epoch\
:param abd_epoch:
:param fs:
:return:
'''
# Clean signal
cleaned = nk.rsp_clean(abd_epoch, sampling_rate=fs)
# Extract peaks
df, peaks_dict = nk.rsp_peaks(cleaned)
peaks_dict = nk.rsp_fixpeaks(peaks_dict)
rsp_rate = nk.rsp_rate(cleaned, peaks_dict, sampling_rate=fs)
rrv = nk.rsp_rrv(rsp_rate, peaks_dict, sampling_rate=fs)
feature_names = list(rrv.columns)
features = rrv.values.flatten()
peaks = peaks_dict['RSP_Peaks']
amplitude = cleaned[peaks_dict['RSP_Peaks']]
# compute the following features
# group 1: features not based on spectrum (temporal features, time domain)
# respiratory rate variability (RRV), or peak-to-peak interval standard deviation
#rrv = np.std(np.diff(peaks)/fs)
# envelope mean
env_mean = amplitude.mean()
# envelope standard deviation
env_std = amplitude.std()
# group 2: features based on spectrum (spectral features, frequency domain)
# convert signal into spectrum
psd, freq = mne.time_frequency.psd_array_multitaper(
abd_epoch, fs, fmin=0, fmax=1,
bandwidth=0.01, normalization='full')
"""
# high band power in db
bp_high_db = band_power(psd, freq, 0.1, 1)
# low band power in db
bp_low_db = band_power(psd, freq, 0.01, 0.1)
# high/low
bp_high_low_ratio = bp_high_db / bp_low_db
# spectrogram kurtosis
spec_peakness =
"""
# spectrum slope - 1/f
# alpha is negative of the slope from lineer regression between log f and log psd
#y = np.log(psd)
#x = np.log(freq)
#alpha = -linregress(x, y).slope
"""
# group 3: complexity (nonlinear domain)
# sample entropy of the waveform
entropy = nk.entropy_sample(abd_epoch)
# poincare plot
# hurst exponent
# lypnov exponent
# detrended fluctuation analysis
"""
features = np.r_[features, env_mean, env_std]#, alpha]
feature_names = np.r_[feature_names, ['env_mean', 'env_std']]#, '1/f alpha'
return features, feature_names
``` |
{
"source": "JiananGao/DP-NMF",
"score": 2
} |
#### File: JiananGao/DP-NMF/dp-nmf.py
```python
import tensorflow as tf
import numpy as np
import tensorflow.examples.tutorials.mnist.input_data as input_data
from pnmf import PNMF
import numpy.linalg as LA
def dpnmf(hidden_units= 500, prob=0.4, lambd= 0.4, alpha= 0.2, beta= 0.2):
###### Denoising Autoencoder ######
# Parameter Declaration
learning_rate = 0.001
batch_size = 50
n_epochs = 50
# Variables/Tensors Declaration
x = tf.placeholder(tf.float32, [None, 784], name='x')
dfro = tf.placeholder(tf.float32, [None, 500], name='dfro')
x_ = tf.add(x ,np.random.normal(loc= 0.0, scale=prob , size= (batch_size, 784) ))
ind = tf.Variable(0, tf.int32)
n_inp = 784
n_out = hidden_units
A = tf.Variable(tf.random_uniform([n_inp, n_out], -1.0 / np.sqrt(n_inp), 1.0 / np.sqrt(n_inp)) ,dtype=tf.float32 )
b = tf.Variable(tf.truncated_normal([n_out], dtype=tf.float32))
A_ = tf.Variable(tf.random_uniform([n_out, n_inp], -1.0 / np.sqrt(n_inp), 1.0 / np.sqrt(n_inp)) ,dtype=tf.float32 )
b_ = tf.Variable(tf.truncated_normal([n_inp], dtype=tf.float32))
z = tf.nn.sigmoid(tf.matmul(x_ , A) + b)
y = tf.nn.sigmoid(tf.matmul(z , A_) + b_)
cost = tf.reduce_mean(-tf.reduce_sum(x * tf.log(tf.clip_by_value(y ,1e-10,1.0))) ) #Cross Entropy Loss
# Manual Gradient Computation
lout = tf.subtract(y,x)
lh = tf.multiply(tf.multiply(tf.matmul(lout, A), z) , (tf.subtract(1.0,z)) )
lb = lh
lb_ = lout
grad_A = tf.add(tf.matmul(tf.transpose(x_) , lh), tf.matmul(tf.transpose(lout), z ))
grad_b = tf.reduce_mean(lb, axis=0)
grad_b_ = tf.reduce_mean(lb_, axis=0)
new_A = A.assign(A - learning_rate * grad_A)
new_A_ = A_.assign(tf.transpose(A))
new_b = b.assign(b - learning_rate * grad_b )
new_b_ = b_.assign(b_ - learning_rate * grad_b_ )
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
mean_img = np.mean(mnist.train.images, axis=0)
saver = tf.train.Saver()
###### Denoising Autoencoder Training ######
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# saver.restore(sess, "./weights/da.cpkt")
print "Pretraining DA..."
for epoch_i in range(n_epochs):
avg_cost = 0
batches= mnist.train.num_examples // batch_size
for batch_i in range(batches):
batch_xs, _ = mnist.train.next_batch(batch_size)
_, _, _, _, ce = sess.run([new_A, new_A_, new_b, new_b_, cost], feed_dict={x: batch_xs})
# _,ce = sess.run([optimizer, cost], feed_dict={x: train})
avg_cost += ce / batches
print(epoch_i, avg_cost)
save = saver.save(sess, "./weights/da.ckpt")
###### Finetuning and factor computation ######
# Parameter Declaration
n_iter = 50 # Number of total iterations
nd_iter = 30 # Number subiterations for Mul-update rules
rank = 10 # Rank for NMF
z = tf.nn.sigmoid(tf.matmul(x , A) + b) # clean input from further computations
y = tf.nn.sigmoid(tf.matmul(z , A_) + b_)
# Computing f(V), reduced input
train_xs = mnist.train.images
fV = sess.run(z, feed_dict={x: train_xs})
W = np.random.random((fV.shape[0], rank))
H = np.random.random((rank, fV.shape[1]))
np.save('encodings/mnist_'+str(hidden_units), fV)
cost = tf.reduce_mean(-tf.reduce_sum(x * tf.log(tf.clip_by_value(y ,1e-10,1.0))) )
# Computing Custom Gradients
lout = tf.subtract(y,x)
lh = tf.multiply(tf.multiply(tf.matmul(lout, A), z) , (tf.subtract(1.0,z)) )
lb = lh
lb_ = lout
frob_norm = 2*(fV - np.dot(W,H))
grad_A1 = tf.add(tf.matmul(tf.transpose(x), lh), tf.matmul(tf.transpose(lout), z ))
grad_A2 = tf.matmul(tf.transpose(x), tf.multiply(z, tf.subtract(1.0, z)))*2*tf.reduce_mean(dfro, axis=0)
grad_A = lambd*grad_A1 + grad_A2
grad_b1 = tf.reduce_mean(lb, axis=0)
grad_b2 = tf.reduce_mean(tf.multiply(z , tf.subtract(1.0, z))*dfro, axis=0)
grad_b = lambd*grad_b1 + grad_b2
grad_b_ = tf.reduce_mean(lb_, axis=0)
new_A = A.assign(A - learning_rate * grad_A)
new_A_ = A_.assign(A_ - learning_rate * tf.transpose(grad_A1) )
new_b = b.assign(b - learning_rate * grad_b )
new_b_ = b_.assign(b_ - learning_rate * grad_b_ )
print "Finetuning..."
for i in range(n_iter):
pnmf = PNMF(fV, W=W, H=H, rank=rank)
pnmf.compute_factors(max_iter= nd_iter, alpha= alpha, beta= beta)
W = pnmf.W
H= pnmf.H
avg_cost = 0
batches= mnist.train.num_examples // batch_size
for batch_i in range(batches):
batch_xs, _ = mnist.train.next_batch(batch_size)
frob_errors = frob_norm[batch_size*batch_i :batch_size*batch_i + batch_size ]
_, _, _, _, nind, ce = sess.run([new_A, new_A_, new_b, new_b_, new_ind, cost], feed_dict={x: batch_xs, dfro: frob_errors})
frob_norm = 2*(fV - np.dot(W,H))
avg_cost += ce / batches
frob_error = LA.norm(fV-np.dot(W,H))
total_loss = lambd * avg_cost + frob_error
print str(i)+ " : " + str(lambd)+"*"+str(avg_cost) + "+"+str(frob_error)+"="+str(total_loss)
if __name__ == '__main__':
dpnmf()
``` |
{
"source": "JiananHe/LicenseAnalysis",
"score": 3
} |
#### File: LicenseAnalysis/ClauseAnalysis/LicenseMatcher.py
```python
from ClauseAnalysis.FilePreprocess import *
from ClauseAnalysis.SentenceExtractor import *
from ClauseAnalysis.SentenceFilter import *
from ClauseAnalysis.SentenceTokenizer import *
import re
license_rules_arrays = []
def read_license_rules():
file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "LicenseRules.txt")
file_open = open(file_path, 'r', encoding='UTF-8')
file_text = file_open.read()
content_string = str(file_text)
global license_rules_arrays
license_rules = content_string.split("\n")
# Traversing through critical_words in reversed order
for index in list(range(len(license_rules) - 1, -1, -1)):
# remove the blank line and comments(start with "#")
if not license_rules[index] or license_rules[index][0] == '#':
license_rules.remove(license_rules[index])
for rule in license_rules:
items = rule.split(":")
rule_name = items[0]
rule_tokenizer = items[1]
rule_tokenizer_array = rule_tokenizer.split(",")
rule_array = [rule_name, rule_tokenizer_array]
license_rules_arrays.append(rule_array)
# print(sentence)
# print(items)
def contains_license_rule(rule_tokenizer_array, result_tokenizer_array):
result_match = 0
result_length = len(result_tokenizer_array)
for rule_index in range(len(rule_tokenizer_array)):
for result_index in range(result_match, result_length):
# print(rule_index,result_index,result_length)
if rule_tokenizer_array[rule_index] == result_tokenizer_array[result_index]:
# print("equal")
# print(rule_tokenizer_array[rule_index])
# print(result_tokenizer_array[result_index])
result_match = rule_index + 1
break
elif result_index == result_length - 1:
# print("return false")
return False
else:
# print("continue")
continue
return True
def license_match_analyse(result_tokenizer_array):
for rules_array in license_rules_arrays:
rule_tokenizer_array = rules_array[1]
is_matched = contains_license_rule(rule_tokenizer_array, result_tokenizer_array)
if is_matched:
return rules_array[0]
else:
return None
class LicenseMatcher:
tokenizer_result_array = []
def __init__(self, tokenizerResultArray):
self.tokenizer_result_array = tokenizerResultArray
read_license_rules()
def execute(self):
return license_match_analyse(self.tokenizer_result_array)
def LicenseMatcherInterface(file_path):
"""
the interface of license clause analysis, integrate all analysis processes.
:param file_path: the path of file needed to be analysed
:return: license_result: the license name of file
"""
filePreprocess = FilePreprocess(file_path)
input_file = filePreprocess.execute()
sentenceExtractor = SentenceExtractor(input_file)
sentenceExtractor.set_analyse_type("text")
sentences_extracted = sentenceExtractor.execute()
sentenceFilter = SentenceFilter(sentences_extracted)
good_sentences, bad_sentences = sentenceFilter.execute()
sentenceTokenizer = SentenceTokenizer(good_sentences)
tokenizer_result_arrays = sentenceTokenizer.execute()
licenseMatcher = LicenseMatcher(tokenizer_result_arrays)
license_result = licenseMatcher.execute()
return license_result
if __name__ == '__main__':
# filePreprocess = FilePreprocess("licenseTestCase1")
# input_file = filePreprocess.execute()
#
# sentenceExtractor = SentenceExtractor(input_file)
# sentences_extracted = sentenceExtractor.execute()
# file_path = os.path.join(sys.path[0], "licenseTestBSD3.txt")
# license_result = LicenseMatcherInterface(file_path)
license_result = LicenseMatcherInterface(r'C:\Users\13249\Desktop\license.txt')
print(license_result)
```
#### File: LicenseAnalysis/ClauseAnalysis/SentenceTokenizer.py
```python
from ClauseAnalysis.FilePreprocess import *
from ClauseAnalysis.SentenceExtractor import *
from ClauseAnalysis.SentenceFilter import *
import re
license_sentences_array = []
def read_license_sentences():
file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "licenseSentence.txt")
file_open = open(file_path, 'r', encoding='UTF-8')
file_text = file_open.read()
content_string = str(file_text)
global license_sentences_array
license_sentences = content_string.split("\n")
# Traversing through critical_words in reversed order
for index in list(range(len(license_sentences)-1, -1, -1)):
if not license_sentences[index] or license_sentences[index][0] == '#':
license_sentences.remove(license_sentences[index])
for sentence in license_sentences:
items = sentence.split(":")
license_sentences_array.append(items)
# print(sentence)
# print(items)
def contains_license_sentence(sentence):
tokenizer_array = []
for licenseSentenceArray in license_sentences_array:
search_result = re.search(r'' + licenseSentenceArray[3] + '', sentence, re.I)
if search_result:
search_part = re.search(r'part\d', licenseSentenceArray[0], re.I)
# If tokenizer's name contains word "Part", like "ApacheLicWherePart1" or "ApacheLicWherePart2v2",
# we need to search next tokenizer in the remaining string in the word.
tokenizer_array.append(licenseSentenceArray[0])
if search_part:
sub_sentence = re.sub(r'' + licenseSentenceArray[3] + '', "", sentence)
sub_search_result = contains_license_sentence(sub_sentence)
if sub_search_result:
tokenizer_array += sub_search_result
return tokenizer_array
else:
return None
def sentence_tokenizer_analyse(sentences):
tokenizer_array = []
for sentence in sentences:
tokenizer = contains_license_sentence(sentence)
if tokenizer:
tokenizer_array += tokenizer
return tokenizer_array
class SentenceTokenizer:
good_sentence_array = []
def __init__(self, goodSentenceArray):
self.good_sentence_array = goodSentenceArray
read_license_sentences()
def execute(self):
return sentence_tokenizer_analyse(self.good_sentence_array)
if __name__ == '__main__':
# filePreprocess = FilePreprocess("licenseTestCase1")
# input_file = filePreprocess.execute()
#
# sentenceExtractor = SentenceExtractor(input_file)
# sentences_extracted = sentenceExtractor.execute()
file_path = os.path.join(sys.path[0], "licenseTestBSD3.txt")
filePreprocess = FilePreprocess(file_path)
input_file = filePreprocess.execute()
sentenceExtractor = SentenceExtractor(input_file)
sentenceExtractor.set_analyse_type("text")
sentences_extracted = sentenceExtractor.execute()
sentenceFilter = SentenceFilter(sentences_extracted)
good_sentences, bad_sentences = sentenceFilter.execute()
sentenceTokenizer = SentenceTokenizer(good_sentences)
tokenizer_result_array = sentenceTokenizer.execute()
for g in good_sentences:
print(g)
for t in tokenizer_result_array:
print(t)
```
#### File: LicenseAnalysis/Compliance/views.py
```python
import json
import os
import sys
import time
import shutil
from django.http import HttpResponse
from django.shortcuts import render
import LicenseModel.models as LM
import Compliance.licenseExtract as LE
import ClauseAnalysis.LicenseMatcher as LCM
import Compliance.complianceAnalysis as LCA
def upload_file(myfile):
nowTime = int(time.time())
UploadFolder = sys.path[0] + os.sep + "UploadFiles"
newPath = os.path.join(UploadFolder, myfile.name + str(nowTime))
newFile = open(newPath, 'wb+')
for chunk in myfile.chunks():
newFile.write(chunk)
newFile.close()
fob = open(newPath, 'r', encoding='UTF-8')
text = fob.read()
newFile.close()
return str(text), newPath
def treeHtmlCode(fileName, layer):
code = r''
if layer == 1:
code += r'<li><span><i class="icon-folder-open"></i>' + str(fileName) + '</span><ul>'
elif layer > 1:
code += r'<li><span><i class="icon-minus-sign"></i>' + str(fileName) + '</span><ul>'
else:
code += r'</ul></li>'
return code
def upload_folder(myfolder):
# mkdir
nowTime = int(time.time())
savePath = sys.path[0] + os.sep + "UploadFiles" + os.sep + "folder" + str(nowTime)
os.mkdir(savePath)
# web tree structure html string
tree_content = r'<ul>'
# dict with all file name and its content after analysis
files_content = {}
# dict with all file name and its license name after analysis
license_names = {}
# license id list, archived for compliance detection
license_id_list = []
dir_stack = []
dir_name = ""
file_id = 0
for file in myfolder:
file_tag = "file" + str(file_id)
file_path = file.name
file_id = file_id + 1
print("each file name: " + file_path)
path_list = file_path.split('/')
# upload single file
file_name = path_list[len(path_list) - 1]
new_file_path = os.path.join(savePath, file_name)
new_file = open(new_file_path, 'wb+')
for chunk in file.chunks():
new_file.write(chunk)
new_file.close()
fob = open(new_file_path, 'r', encoding='UTF-8')
text = str(fob.read())
# call the license extraction code 1.0
licenseId, tmp = LE.generate_license_presentation(text)
# call the license extraction code 2.0 if the old algorithm get -1
if licenseId == -1:
license_abbr = LCM.LicenseMatcherInterface(new_file_path)
print("*******license analysis results with clause analysis*********" + str(license_abbr))
licenseId = LM.getLicenseId(license_abbr)
if not licenseId == -1:
license_id_list.append(licenseId)
# get license abbreviation
licenseAbbr = LM.getLicenseAbbr(licenseId)
files_content[str(file_tag)] = json.dumps(tmp)
license_name = LM.getLicenseName(licenseId)
license_name = '<a href="/license/introduction?search-text=' + \
licenseAbbr + '"><black>' + license_name + '</black></a>'
license_names[str(file_tag)] = json.dumps(license_name)
# record file directory structure
dir_layer = 0
for pt in path_list:
print(pt)
if pt == file_name:
tree_content += r'<li><span><i class="icon-leaf"></i>' + str(
file_name) + '</span><a onclick=showContent("' + str(file_tag) + '")>' + str(
licenseAbbr) + '</a></li>'
elif pt in dir_stack:
dir_layer = dir_layer + 1
continue
elif dir_layer == len(dir_stack): # push
dir_layer = dir_layer + 1
dir_stack.append(pt)
tree_content += treeHtmlCode(pt, dir_layer)
else: # pop olds then push new
while dir_layer < len(dir_stack):
tree_content += treeHtmlCode('', -1)
dir_stack.pop()
dir_layer = dir_layer + 1
dir_stack.append(pt)
tree_content += treeHtmlCode(pt, dir_layer)
tree_content += r'</ul>'
# call compliance analysis code
print("---------license_id_dict-------------")
license_id_set = set(license_id_list) # unduplicated
license_id_dict = {}
for i, license_id in enumerate(license_id_set):
license_id_dict[i] = license_id
print(license_id_dict)
compliance_detector = LCA.Compliance(license_id_dict)
compliance_license_id = compliance_detector.get_compatible_licenses_processed()
print(compliance_license_id)
compliance_result = 'The licenses for uploaded files includes: <br /><strongBlue>'
for id in license_id_set:
abbr = LM.getLicenseAbbr(id)
compliance_result += '<a href="/license/introduction?search-text=' + abbr + '">' + abbr + '</a>'
compliance_result += ', '
compliance_result += '</strongBlue><br />The licenses which are compatible with them includes:<br /><strongBlue>'
for id in compliance_license_id.values():
abbr = LM.getLicenseAbbr(id)
compliance_result += '<a href="/license/introduction?search-text=' + abbr + '">' + abbr + '</a>'
compliance_result += ', '
compliance_result += '</strongBlue>'
return files_content, tree_content, license_names, compliance_result
# Create your views here.
def index(request):
if request.POST:
# user upload a folder
myfolder = request.FILES.getlist("user_folder", None)
# user upload a file
myfile = request.FILES.get("user_file", None)
# user input license content
text = request.POST['user_input']
if myfolder:
files_content, tree_content, license_names, compliance_result = upload_folder(myfolder)
return render(request, "compliance.html", {'hidden1': "", 'hidden2': "Hidden",
'files_content': files_content,
'license_names': license_names,
'tree_content': tree_content,
'compliance_result': json.dumps(compliance_result)})
elif myfile:
text, new_file_path = upload_file(myfile)
# print("============= user file text : " + text)
# print("========== the end of text : ")
# call the license extraction code 1.0
id, result = LE.generate_license_presentation(text)
# call the license extraction code 2.0 if the old algorithm get -1
if id == -1:
license_abbr = LCM.LicenseMatcherInterface(new_file_path)
print("*******license analysis results with clause analysis*********" + str(license_abbr))
id = LM.getLicenseId(license_abbr)
license_name = LM.getLicenseName(id)
license_abbr = LM.getLicenseAbbr(id)
license_name = '<a href="/license/introduction?search-text=' + \
license_abbr + '"><black>' + license_name + '</black></a>'
return render(request, "compliance.html", {'result': json.dumps(result),
'license_name': json.dumps(license_name),
'hidden1': "Hidden",
'hidden2': ""})
elif text != "":
text = str(text)
# print("========== use input text : " + text)
# print("========== the end of text : ")
id, result = LE.generate_license_presentation(text)
# print(result)
license_name = LM.getLicenseName(id)
return render(request, "compliance.html", {'result': json.dumps(result),
'license_name': json.dumps(license_name),
'hidden1': "Hidden",
'hidden2': ""})
else:
return render(request, "compliance.html", {'hidden1': "Hidden", 'hidden2': "Hidden"})
else:
return render(request, "compliance.html", {'hidden1': "Hidden", 'hidden2': "Hidden"})
```
#### File: LicenseAnalysis/Conflict/views.py
```python
import time
from django.http import HttpResponse
from django.shortcuts import render
import os, sys
import json
import LicenseModel.models as LM
import Compliance.licenseExtract as LE
import ClauseAnalysis.LicenseMatcher as LCM
import Compliance.complianceAnalysis as LCA
def upload_file(myfolder):
for file in myfolder:
print("each file name: " + file.name)
def treeHtmlCode(fileName, layer):
code = r''
if layer == 1:
code += r'<li><span><i class="icon-folder-open"></i>' + str(fileName) + '</span><ul>'
elif layer > 1:
code += r'<li><span><i class="icon-minus-sign"></i>' + str(fileName) + '</span><ul>'
else:
code += r'</ul></li>'
return code
def upload_folder(myfolder):
# mkdir
nowTime = int(time.time())
savePath = sys.path[0] + os.sep + "UploadFiles" + os.sep + "folder" + str(nowTime)
os.mkdir(savePath)
# web tree structure html string
tree_content = r'<ul>'
# dict with all file name and its content after analysis
files_content = {}
# dict with all file name and its license name after analysis
license_names = {}
# license id list, archived for compliance detection
license_id_list = []
license_abbr_list = []
dir_stack = []
dir_name = ""
file_id = 0
for file in myfolder:
file_tag = "file" + str(file_id)
file_path = file.name
file_id = file_id + 1
print("each file name: " + file_path)
path_list = file_path.split('/')
# upload single file
file_name = path_list[len(path_list) - 1]
new_file_path = os.path.join(savePath, file_name)
new_file = open(new_file_path, 'wb+')
for chunk in file.chunks():
new_file.write(chunk)
new_file.close()
fob = open(new_file_path, 'r', encoding='UTF-8')
text = str(fob.read())
# call the license extraction code 1.0
licenseId, tmp = LE.generate_license_presentation(text)
# call the license extraction code 2.0 if the old algorithm get -1
if licenseId == -1:
license_abbr = LCM.LicenseMatcherInterface(new_file_path)
print("*******license analysis results with clause analysis*********" + str(license_abbr))
licenseId = LM.getLicenseId(license_abbr)
if not licenseId == -1:
license_id_list.append(licenseId)
# get license abbreviation
licenseAbbr = LM.getLicenseAbbr(licenseId)
if licenseAbbr != 'no license':
license_abbr_list.append(licenseAbbr)
files_content[str(file_tag)] = json.dumps(tmp)
license_name = LM.getLicenseName(licenseId)
license_name = '<a href="/license/introduction?search-text=' + \
licenseAbbr + '"><black>' + license_name + '</black></a>'
license_names[str(file_tag)] = json.dumps(license_name)
# record file directory structure
dir_layer = 0
for pt in path_list:
print(pt)
if pt == file_name:
tree_content += r'<li><span><i class="icon-leaf"></i>' + str(
file_name) + '</span><a onclick=showContent("' + str(file_tag) + '")>' + str(
licenseAbbr) + '</a></li>'
elif pt in dir_stack:
dir_layer = dir_layer + 1
continue
elif dir_layer == len(dir_stack): # push
dir_layer = dir_layer + 1
dir_stack.append(pt)
tree_content += treeHtmlCode(pt, dir_layer)
else: # pop olds then push new
while dir_layer < len(dir_stack):
tree_content += treeHtmlCode('', -1)
dir_stack.pop()
dir_layer = dir_layer + 1
dir_stack.append(pt)
tree_content += treeHtmlCode(pt, dir_layer)
tree_content += r'</ul>'
# call conflict analysis code
conflict_list = [['Y', 'Y', 'N', 'Y', 'Y', 'Y', 'Y'],
['Y', 'Y', 'Y', 'N', 'Y', 'Y', 'Y'],
['N', 'Y', 'Y', 'Y', 'Y', 'Y', 'N'],
['Y', 'N', 'Y', 'Y', 'Y', 'Y', 'Y'],
['Y', 'Y', 'Y', 'Y', 'Y', 'N', 'Y'],
['Y', 'Y', 'Y', 'Y', 'N', 'Y', 'Y'],
['Y', 'Y', 'N', 'Y', 'Y', 'Y', 'Y']]
conflict_result = r'<table class="table">'
conflict_result += r'<tbody>'
conflict_result += r'<tr>'
conflict_result += r'<td> </td>'
for abbr in license_abbr_list:
conflict_result += r'<td>' + r'<a href="/license/introduction?search-text=' + abbr + r'">' + abbr + r'</a></td>'
conflict_result += r'</tr>'
for i, abbr in enumerate(license_abbr_list):
conflict_result += r'<tr>'
conflict_result += r'<td>' + r'<a href="/license/introduction?search-text=' + abbr + r'">' + abbr + r'</a></td>'
for ans in conflict_list[i]:
conflict_result += r'<td>' + ans + r'</td>'
conflict_result += r'</tr>'
conflict_result += r'</tbody>'
conflict_result += r'</table>'
return files_content, tree_content, license_names, conflict_result
# Create your views here.
def index(request):
if request.POST:
# user upload a folder
myfolder = request.FILES.getlist("user_folder", None)
if myfolder:
files_content, tree_content, license_names, conflict_result = upload_folder(myfolder)
return render(request, "conflict.html", {'hidden1': "",
'files_content': files_content,
'license_names': license_names,
'tree_content': tree_content,
'conflict_result': json.dumps(conflict_result)})
else:
return render(request, "conflict.html", {'hidden1': "Hidden", 'hidden2': "Hidden"})
else:
return render(request, "conflict.html", {'hidden1': "Hidden", 'hidden2': "Hidden"})
``` |
{
"source": "jianantian/entity_recognizer",
"score": 2
} |
#### File: jianantian/entity_recognizer/edie_dis.py
```python
import logging
import numpy as np
import pypinyin
import re
import os
from pprint import pprint
from pypinyin import lazy_pinyin
from pypinyin import pinyin
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
def get_pinyin(hans):
"""把汉字转换为对应的拼音, 声调用数字表示, 写在相应的韵母之后"""
return ''.join(lazy_pinyin(hans, style=pypinyin.NORMAL))
def check_lang(token):
"""检查token是否为英语(包含阿拉伯数字)"""
lang_pattern=r'^[A-Za-z0-9_]+$'
return re.search(lang_pattern, token)
def trans_punctuation(word):
# 去掉字符串中的\xa0, 以免报错
# 文档中中文标点与英文混用, 把句子中的标点替换为半角, 句号不转换, 便于和小数点区分, 用于断句
word = word.replace('\xa0', '')
trans_table= str.maketrans(',:“”();、',',:\"\"();&')
word = word.translate(trans_table)
return word
def levenshtein(str_1, str_2):
"""计算两个字符串之间的 Levenshtein距离"""
if not check_lang(str_1):
str_1 = get_pinyin(str_1)
if not check_lang(str_2):
str_2 = get_pinyin(str_2)
len_1 = len(str_1) + 1
len_2 = len(str_2) + 1
tmp_mat= np.zeros((len_1, len_2), dtype=np.int)
for i in range(1, len_1):
tmp_mat[i, 0] = i
for j in range(1, len_2):
tmp_mat[0, j] = j
for j in range(1, len_2):
for i in range(1, len_1):
if str_1[i-1] == str_2[j-1]:
tmp_mat[i, j] = tmp_mat[i-1, j-1]
else:
tmp_mat[i, j] = min(
tmp_mat[i-1, j] + 1,
tmp_mat[i, j - 1] + 1,
tmp_mat[i-1, j-1] + 1,
)
return tmp_mat[-1, -1]
def get_dis(word, word_list):
dis_list = [levenshtein(word, stand_word)/len(stand_word) for stand_word in word_list]
dis_list_modi = [x for x in dis_list if x < 0.2]
if len(dis_list_modi):
return 1
else:
return 0
def get_seed(dic_path, dic_type):
dic_name = dic_type + '.txt'
dic = os.path.join(dic_path, dic_name)
with open(dic, 'r', encoding='utf8') as dic_fr:
word_list = [trans_punctuation(x.split()[0]) for x in dic_fr.readlines()]
return word_list
dic_path = 'C:/Users/yingying.zhu/Documents/dicts'
dic_type = 'bodypart'
words = get_seed(dic_path, dic_type)
s_1 = '肺'
s_2 = '肺"'
print(get_pinyin('肺"'))
dis = levenshtein(s_1, s_2)
rate = dis/max(len(get_pinyin(s_1)), len(get_pinyin(s_2)))
print(rate)
pprint(words)
print(get_dis(s_2, words))
```
#### File: jianantian/entity_recognizer/get_tutor.py
```python
import json
import math
import os
import re
import string
from pprint import pprint
from functools import reduce
def list_add(a, b):
return a+b
def modify_for_re(word):
"""在字符串中的(及)前面加上\, 方便转换成正则表达式"""
to_be_removed = ['(', ')', '+', '*', '^', '.', '?', '$', '|']
for signal in to_be_removed:
word = word.replace(signal, '\\' + signal)
return word
def trans_punctuation(word):
# 去掉字符串中的\xa0, 以免报错
# 文档中中文标点与英文混用, 把句子中的标点替换为半角, 句号不转换, 便于和小数点区分, 用于断句
word = word.replace('\xa0', '')
trans_table= str.maketrans(',:“”();、',',:\"\"();&')
word = word.translate(trans_table)
return word
def remove_punctuation(word):
punctuation = string.punctuation
punc_list = [ord(s) for s in punctuation]
trans_table = dict().fromkeys(punc_list, '')
return word.translate(trans_table)
def get_ordinary_word(ord_path):
"""常用词词典"""
ord_dic = []
with open(ord_path, 'r', encoding='utf8') as fr:
for line in fr.readlines():
ord_dic.append(line.split()[0])
return ord_dic
def get_seed(dic_path, dic_type):
dic_name = dic_type + '.txt'
dic = os.path.join(dic_path, dic_name)
with open(dic, 'r', encoding='utf8') as dic_fr:
word_list = [trans_punctuation(x.split()[0]) for x in dic_fr.readlines()]
return word_list
def get_neg_list(dic_path, ord_path, dic_type):
neg_list = get_ordinary_word(ord_path)
dic_list = os.listdir(dic_path)
for dic_name in dic_list:
if (dic_name.endswith('txt')) and (dic_type not in dic_name) and ('total' not in dic_name):
neg_path = os.path.join(dic_path, dic_name)
with open(neg_path, 'r', encoding='utf8') as dic_fr:
w_list = [trans_punctuation(x.split()[0]) for x in dic_fr.readlines() if len(x.strip()) > 0]
print
neg_list.extend(w_list)
return neg_list
def manual_filtration(word, neg_list):
"""作用在 word 上, 若该词为负例则返回 True, 否则返回 False"""
pattern_1 = r',|\.|:|;|"'
pattern_2 = r'行|示|为|较|见|天|音'
pattern_3 = r'切除|标本|摄取|存在|活检|穿刺|开口|引流|胸痛|患者|治疗|不适|受限|疼痛|基本|压缩'
pattern_4 = r'^[A-Za-z0-9_]+$'
remove_word_list = neg_list + ['病理', '癌', '炎', '占位']
tnm_pattern = r'[Tt]\S{1,2}[Nn][xX0123][Mm][01]'
word_no_punc = remove_punctuation(word)
if ((not re.search(pattern_1, word)) and (not re.search(pattern_2, word)) and (not re.search(pattern_3, word))
and (not re.search(pattern_4, word)) and len(word_no_punc) > 1 and (word_no_punc not in remove_word_list)):
if (not re.search(tnm_pattern, word)) and re.search(r'\d', word):
return True
else:
return False
else:
return True
def get_txt_file(txt_path):
""" 从整理好的文本中读取相应内容做成一个列表,
列表的每个元为一个句子. 该结果仅用于提取字典使用,
不能用于后面的任务, 因为所有病人的文本都混到了一起 """
txt_file = []
file_list = os.listdir(txt_path)
for file in file_list:
if file.endswith('json'):
with open(os.path.join(txt_path, file), 'r', encoding='utf8') as txt_fr:
txt_dic = json.load(txt_fr)
for txt_date in txt_dic.keys():
line_list = [line.strip() for line in txt_dic[txt_date].split('。') if line.strip() !='']
txt_file.extend(line_list)
return txt_file
# 利用生成器, 每次使用的时候都要生成, 不知道和上面的列表哪一种方式更快
# def get_txt_file(path):
# """ 从整理好的文本中读取相应内容做成一个列表,
# 列表的每个元为一个句子. 该结果仅用于提取字典使用,
# 不能用于后面的任务, 因为所有病人的文本都混到了一起 """
# file_list = os.listdir(path)
# for file in file_list:
# if os.path.splitext(file)[-1] == '.json':
# with open(os.path.join(path, file), 'r', encoding='utf8') as txt_fr:
# txt_dic = json.load(txt_fr)
# for txt_date in txt_dic.keys():
# for line in (line.strip() for line in txt_dic[txt_date].split('。') if line.strip() !=''):
# yield txt_file
def find_mod(txt_file, word_list):
"""用字典中的词发现文本模式"""
word_count = {}
# 用一个字典保存, key为发现的文本模式, 键值为匹配该模式的词典中的词的数目
mod_list = []
# 文本模式以列表形式保存
word_match = {}
p = 5
q = 5
for line in txt_file:
#line = trans_punctuation(line)
if len(line) > 0:
for word in word_list:
word = modify_for_re(word)
loc_list = [w.start() for w in re.finditer(word, line)]
for loc in loc_list:
for i in range(1, (p + 1)):
for j in range(1, (q + 1)):
if loc - i >= 0 and loc + len(word) + j < len(line):
ext_word = line[loc - i: loc + len(word) + j]
ext_wd = modify_for_re(ext_word)
local_ind = ext_wd.index(word)
try:
# mod = re.compile(ext_wd[:local_ind]+'(\S{%d})'%len(word)+ext_wd[local_ind+len(word):])
mod = (ext_wd[:local_ind], ext_wd[local_ind + len(word):])
except re.error:
print(word + '\t\t' + ext_word + '\n')
if mod not in mod_list:
mod_list.append(mod)
word_match[mod] = {word}
else:
word_match[mod].add(word)
for mod in mod_list:
word_count[mod] = len(word_match[mod])
return mod_list, word_count, word_match
def find_word(txt_file, mod_list, word_list, neg_list):
"""用发现的模式去发现文本中的新词"""
mod_count = {}
# 键为发现的模式, 相应的值为匹配到的词的数目
mod_match = {}
# 键为发现的模式, 相应的值为匹配到的词的集合
mod_match_neg = {}
mod_match_unlabeled = {}
new_word = set()
# 匹配到的新词的集合
for mod in mod_list:
word_set = set()
for line in txt_file:
#line = trans_punctuation(line)
left_index = [w.end() for w in re.finditer(mod[0], line)]
right_index = [w.start() for w in re.finditer(mod[1], line)]
start = 0
i, j = 0, 0
for i in range(len(left_index)):
if start < len(right_index):
for j in range(start, len(right_index)):
if right_index[j] > left_index[i] and (i == len(left_index)-1 or right_index[j] <= left_index[i+1]):
word = line[left_index[i]: right_index[j]]
if len(word) < 15:
# print (word)
# print (file)
word_set.add(word)
start += 1
break
elif i < len(left_index) - 1 and right_index[j] > left_index[i+1]:
break
else:
start += 1
num_extract = len(word_set)
mod_count[mod] = num_extract
mod_match[mod] = word_set
unlabeled_word = word_set.difference(set(word_list))
#neg_word_type1 = unlabeled_word.intersection(set(neg_list))
#unlabeled_word = unlabeled_word.difference(neg_word_type1)
neg_word = set([word for word in unlabeled_word if manual_filtration(word, neg_list) or (remove_punctuation(word) in word_list)])
#neg_word = neg_word_type1.union(neg_word_type2)
unlabeled_word = unlabeled_word.difference(neg_word)
mod_match_neg[mod] = neg_word
mod_match_unlabeled[mod] = unlabeled_word
new_word = new_word.union(unlabeled_word)
# new_word = new_word.difference(set(word_list))
return new_word, mod_count, mod_match, mod_match_neg, mod_match_unlabeled
def score_mod(mod, word_count, mod_count, mod_match_unlabel):
"""计算模式的评分"""
p = word_count[mod]
u = len(mod_match_unlabel[mod])
t = mod_count[mod]
return (p / t) * math.log(u + 1, 2) * math.log(p + 1, 2)
def score_word(word, mod_list, word_count, mod_match):
import math
m_list = [mod for mod in mod_list if word in mod_match[mod]]
return sum([math.log(word_count[mod] + 1, 2) for mod in m_list]) / (len(m_list) + 1)
def postmodify_word(word):
"""在一些明显不合理的词前面加上适当的前缀"""
res = []
direc_list = ['左', '右', '双']
if re.search(r'^侧|叶|肺', word) and not re.search(r'[左右]', word):
for direc in direc_list:
res.append(direc + word)
else:
res.append(word)
return res
def get_user_dict(txt_path, dic_path, ord_path, dic_type, iter_times=5, inital_lenth_mod=80,
lenth_word=50, extend_rate=10, mod_threshold=0.5, word_threshold=1.0):
word_list = get_seed(dic_path, dic_type)
neg_list = get_neg_list(dic_path, ord_path, dic_type)
txt_file = get_txt_file(txt_path)
res = []
num = 0
while num < iter_times:
mod_list, word_count, word_match = find_mod(txt_file, word_list)
new_word, mod_count, mod_match, mod_match_neg, mod_match_unlabel = find_word(txt_file, mod_list, word_list, neg_list)
mod_score_list = [score_mod(mod, word_count, mod_count, mod_match_unlabel) for mod in mod_list]
mod_selected = []
lenth_mod = inital_lenth_mod + num * extend_rate
mod_score = list(filter(lambda f: f[1] > mod_threshold, sorted(zip(mod_list, mod_score_list), key=lambda x: x[1], reverse=True)))[:lenth_mod]
# 模式库是需要每一轮都增加的还是每次都取最好的???????
mod_selected.extend([x[0] for x in mod_score])
new_word, mod_count, mod_match, mod_match_neg, mod_match_unlabel = find_word(txt_file, mod_selected, word_list, neg_list)
word_score_list = [score_word(word, mod_selected, word_count, mod_match) for word in new_word]
word_score = list(filter(lambda f: f[1] > word_threshold, sorted(zip(new_word, word_score_list), key=lambda x: x[1], reverse=True)))[:lenth_word]
add_word = [x[0] for x in word_score]
#add_word_modify = list(reduce(list_add, (postmodify_word(x) for x in add_word)))
#word_list = word_list + add_word_modify
#word_set = set(word_list)
#word_list = list(word_set)
#res = res.union(word_set.intersection(set(add_word_modify)))
word_list.extend(add_word)
res.extend(add_word)
num += 1
print("Run time: NO. %d"%num + "\t\tAdd %d words to dictionary"%len(add_word))
return res
# 在负例词典中加入手术, 药物和身体部位, 并且身体部位词可以和方位词进行组合
# 方位词主要有: 上下左右半前后
if __name__ == '__main__':
txt_path = 'e:/test/病例特点/'
dic_path = 'C:/Users/yingying.zhu/Documents/dicts'
ord_path = 'C:/Users/yingying.zhu/Documents/现代汉语常用词表.txt'
dic_type = 'tutor'
res = get_user_dict(txt_path, dic_path, ord_path, dic_type)
with open('C:/Users/yingying.zhu/Desktop/user_dic.txt', 'w', encoding='utf8') as fr:
fr.write('\n'.join(list(res)))
```
#### File: jianantian/entity_recognizer/mod_class.py
```python
import math
import os
import re
def frequence(word, path):
"""计算 word 在 path 处的文档库中出现的频数"""
res = 0
file_list = os.listdir(path)
for file in file_list:
with open(os.path.join(path, file), 'r', encoding='utf8') as txt_fr:
txt_file = some_little_modify(modified(txt_fr.read()))
res += txt_file.count(word)
return res
class Mod(object):
def __init__(self, mod_name, mod_match=None, word_match=None, mod_match_neg=None, mod_match_unlabel=None):
self.name = mod_name
self.match = mod_match
self.match_positive = word_match
self.match_negtive = mod_match_neg
self.match_unlabel = mod_match_unlabel
self.counts = len(self.match)
self.counts_positive = len(self.match_positive)
self.counts_negtive = len(mod_match_neg)
self.counts_unlabel = len(mod_match_unlabel)
def get_score(self):
precision = self.counts_positive/self.count # 准确率
capacity = math.log(1 + self.counts_unlabel, 2) # 发现新词的能力
recall = math.log(1 + self.counts_positive, 2) # 被词典中的词汇匹配的程度
score = precision * recall * capacity
self.__score = score
return score
def __str__(self):
return self.name
class Word(object):
def __init__(self, word_name, match_list=None, frequence=None):
self.name = word_name
self.match = match_list # 匹配 word 的模式列表
self.counts = len(self.match)
self.frequence = frequence
def get_match(self, mod_list):
self.match = [mod for mod in mod_list if self.name in mod.match]
return self.match
def get_frequence(self, doc_path):
self.frequence = frequence(self.name, doc_path)
return self.frequence
def get_score(self):
self.score = sum([math.log(mod.counts_positive + 1, 2)
for mod in self.match])/self.counts
return self.score
class Entity(object):
def __init__(self, name, entity_type, start=None, end=None, time=None, patient=None):
self.name = name
self.type = entity_type
self.start = start
self.end = end
self.time = time
self.patient = patient
def __str__(self):
return self.name
class Tempor(object):
def __init__(self, name, start=None, end=None, patient=None, entity_pos=after, scope=None):
""" scope 表示时间实体的作用域, 及该范围内的实体对应的都是该时间
entity_pos 表示该时间对应的实体的相对位置, 即是在时间之前还是时间之后, 取值为 before 或 after """
self.name = name
self.start = start
self.end = end
self.patient = patient
self.scope = scope
self.entity_pos = entity_pos
```
#### File: jianantian/entity_recognizer/seg_cut.py
```python
def seg_cut(text, dicts, max_lenth=5):
"""正向最大匹配法"""
start = 0
result = ''
while start < len(text):
temp = text[start:]
end = min(start + max_lenth, len(temp))
while end > 0:
sentence = temp[: end]
print(sentence)
if sentence not in dicts:
end -= 1
else:
result += sentence + '/'
start += len(sentence)
break
return result
def seg_cut_2(text, dicts, max_lenth=5):
"""逆向最大匹配法"""
result = ''
end = len(text)
while end > 0:
temp = text[:end]
start = max(0, end - max_lenth)
while start < end:
sentence = temp[start:]
if sentence in dicts:
result = sentence + '/' + result
end -= len(sentence)
break
else:
start -= 1
return result
```
#### File: jianantian/entity_recognizer/temp.py
```python
def find_mod(path, dic):
"""用字典中的词发现文本模式"""
file_list = os.listdir(path)
word_list = read_dict(dic)
word_count = {}
#用一个字典保存, key为发现的文本模式, 键值为匹配该模式的词典中的词的数目
mod_list = []
#文本模式以列表形式保存
word_match = {}
p = 5
q = 5
for file in file_list:
with open(os.path.join(path, file), 'r', encoding='utf8') as txt_fr:
txt_file = txt_fr.readlines()
#txt_file = modified(txt_fr.read())
for line in txt_file:
line = modified(line)
if len(line) > 0:
for word in word_list:
loc_list = [w.start() for w in re.finditer(word, line)]
for loc in loc_list:
for i in range(1, (p+1)):
for j in range(1,(q+1)):
if loc - i >= 0 and loc + len(word) + j <len(line):
ext_word = line[loc - i: loc + len(word) + j]
ext_wd = some_little_modify(ext_word)
local_ind = ext_wd.index(some_little_modify(word))
try:
#mod = re.compile(ext_wd[:local_ind]+'(\S{%d})'%len(word)+ext_wd[local_ind+len(word):])
mod = (ext_wd[:local_ind], ext_wd[local_ind+len(word):])
except re.error:
print (word + '\t\t' + ext_word + '\n')
if mod not in mod_list:
mod_list.append(mod)
word_match[mod] = {word}
else:
word_match[mod].add(word)
for mod in mod_list:
word_count[mod] = len(word_match[mod])
return mod_list, word_count, word_match
def find_word(path, mod_list, dic):
"""用发现的模式去发现文本中的新词"""
file_list = os.listdir(path)
word_list = read_dict(dic)
mod_count = {}
#键为发现的模式, 相应的值为匹配到的词的数目
mod_match = {}
#键为发现的模式, 相应的值为匹配到的词的集合
new_word = set()
#匹配到的新词的集合
for mod in mod_list:
word_set = set()
for file in file_list:
with open(os.path.join(path, file), 'r', encoding='utf8') as txt_fr:
#txt_file = modified(txt_fr.read())
txt_list = txt_fr.readlines()
for line in txt_list:
line = modified(line)
left_index = [w.end() for w in re.finditer(mod[0], line)]
right_index = [w.start() for w in re.finditer(mod[1], line)]
start = 0
i, j = 0, 0
for i in range(len(left_index)):
if start < len(right_index):
for j in range(start, len(right_index)):
if right_index[j] > left_index[i] and (i == len(left_index)-1 or right_index[j] <= left_index[i+1]):
word = line[left_index[i]: right_index[j]]
if len(word) < 10:
print (word)
print (file)
word_set.add(word)
start += 1
break
elif i < len(left_index) - 1 and right_index[j] > left_index[i+1]:
break
else:
start += 1
#wor_set = wor_set.difference(set(word_list))
num_extract = len(word_set)
mod_count[mod] = num_extract
mod_match[mod] = word_set
new_word = new_word.union(word_set)
new_word = new_word.difference(set(word_list))
return new_word, mod_count, mod_match
# def find_word(path, mod_list, dic):
# """用发现的模式去发现文本中的新词"""
# file_list = os.listdir(path)
# word_list = read_dict(dic)
# mod_count = {}
# #键为发现的模式, 相应的值为匹配到的词的数目
# mod_match = {}
# #键为发现的模式, 相应的值为匹配到的词的集合
# new_word = set()
# #匹配到的新词的集合
# for mod in mod_list:
# wor_set = set()
# for file in file_list:
# with open(os.path.join(path, file), 'r', encoding='utf8') as txt_fr:
# txt_file = txt_fr.read()
# wor_set = wor_set.union(set(re.findall(mod, txt_file)))
# #wor_set = wor_set.difference(set(word_list))
# num_extract = len(wor_set)
# mod_count[mod] = num_extract
# mod_match[mod] = wor_set
# new_word = new_word.union(wor_set)
# new_word = new_word.difference(set(word_list))
# return new_word, mod_count, mod_match
# def find_mod(path, dic):
# """用字典中的词发现文本模式"""
# file_list = os.listdir(path)
# word_list = read_dict(dic)
# word_count = {}
# #用一个字典保存, key为发现的文本模式, 键值为匹配该模式的词典中的词的数目
# mod_list = []
# #文本模式以列表形式保存
# word_match = {}
# for file in file_list:
# with open(os.path.join(path, file), 'r', encoding='utf8') as txt_fr:
# p = 5
# q = 5
# txt_file = modified(txt_fr.read())
# if len(txt_file) > 0:
# for word in word_list:
# loc_list = [w.start() for w in re.finditer(word, txt_file)]
# for loc in loc_list:
# for i in range(1, (p+1)):
# for j in range(1,(q+1)):
# if loc - i >= 0 and loc + len(word) + j <len(txt_file):
# ext_word = txt_file[loc - i: loc + len(word) + j]
# ext_wd = some_little_modify(ext_word)
# local_ind = ext_wd.index(some_little_modify(word))
# try:
# #mod = re.compile(ext_wd[:local_ind]+'(\S{%d})'%len(word)+ext_wd[local_ind+len(word):])
# mod = (ext_wd[:local_ind], ext_wd[local_ind+len(word):])
# except re.error:
# print (word + '\t\t' + ext_word + '\n')
# if mod not in mod_list:
# mod_list.append(mod)
# word_match[mod] = {word}
# else:
# word_match[mod].add(word)
# for mod in mod_list:
# word_count[mod] = len(word_match[mod])
# return mod_list, word_count, word_match
# def find_word(path, mod_list, dic):
# """用发现的模式去发现文本中的新词"""
# file_list = os.listdir(path)
# word_list = read_dict(dic)
# mod_count = {}
# #键为发现的模式, 相应的值为匹配到的词的数目
# mod_match = {}
# #键为发现的模式, 相应的值为匹配到的词的集合
# new_word = set()
# #匹配到的新词的集合
# for mod in mod_list:
# wor_set = set()
# for file in file_list:
# with open(os.path.join(path, file), 'r', encoding='utf8') as txt_fr:
# txt_file = modified(txt_fr.read())
# left_index = [w.start() for w in re.finditer(mod[0], txt_file)]
# right_index = [w.start() for w in re.finditer(mod[1], txt_file)]
# start = 0
# for i in range(len(left_index)):
# for j in range(start, len(right_index)):
# if right_index[j] > left_index[i] and right_index[j] <= left_index[i+1]:
# word = text_file[left_index[i], right_index[j]]
# wor_set.add(word)
# start += 1
# break
# elif right_index[j] > left_index[i+1]:
# break
# else:
# start += 1
# #wor_set = wor_set.difference(set(word_list))
# num_extract = len(wor_set)
# mod_count[mod] = num_extract
# mod_match[mod] = wor_set
# new_word = new_word.union(wor_set)
# new_word = new_word.difference(set(word_list))
# return new_word, mod_count, mod_match
``` |
{
"source": "jianantian/yolo3-pytorch",
"score": 3
} |
#### File: yolo3-pytorch/yolo/model.py
```python
import numpy as np
import torch
import torch.nn as nn
from yolo import utils
def _content_parse(value: str):
"""
:param value:
:return:
"""
try:
res = int(value)
except ValueError:
try:
res = float(value)
except ValueError:
res = value
return res
def cfg_parser(cfg_filename: str):
"""
:param cfg_filename:
:return:
"""
with open(cfg_filename, 'r', encoding='utf8') as fr:
block_list = []
block = {}
for _line in fr.readlines():
line = _line.strip()
if not line:
continue
if line[0] == '[':
if block:
block_list.append(block)
block = {}
name = line[1:-1]
block['name'] = name
elif line[0] != '#':
try:
k, v = line.split('=')
except ValueError:
pass
else:
block[k.strip()] = _content_parse(v.strip())
if block:
block_list.append(block)
return block_list
class EmptyLayer(nn.Module):
"""
"""
def __init__(self, start=None, end=None):
super().__init__()
self.start = start
self.end = end
class DetectionLayer(nn.Module):
"""
"""
def __init__(self, block, num_class, anchors):
"""
:param block:
"""
super().__init__()
self.name = 'detection'
_mask = block['mask']
mask_tuple = tuple(int(_x.strip()) for _x in _mask.split(','))
if anchors is None:
_anchors = block['anchors']
_anchors = (_t.strip() for _t in _anchors.split(', '))
def anchor_getter(_x):
"""
:param _x:
:return:
"""
_x_list = _x.split(',')
return int(_x_list[0]), int(_x_list[1])
anchor_tuple = tuple(anchor_getter(_x) for _x in _anchors)
else:
anchor_tuple = anchors
self.mask = mask_tuple
self.anchors = anchor_tuple
self.anchor_num = block['num']
self.class_num = num_class
# self.class_num = 5
self.jitter = block['jitter']
self.ignore_thresh = block['ignore_thresh']
self.truth_thresh = block['truth_thresh']
self.random = bool(block['random'])
self.used_anchors = tuple(anchor_tuple[i] for i in mask_tuple)
def process_detection(x, grid_size, stride, anchors):
"""
:param x:
:param grid_size:
:param stride:
:param anchors:
:return:
"""
device = x.device
num_anchors = len(anchors)
grid = np.arange(grid_size)
a, b = np.meshgrid(grid, grid)
x_offset = torch.tensor(a, dtype=torch.float, device=device).reshape(-1, 1)
y_offset = torch.tensor(b, dtype=torch.float, device=device).reshape(-1, 1)
x_y_offset = torch.cat((x_offset, y_offset), 1).repeat(1, num_anchors).reshape(-1, 2).unsqueeze(0)
x[..., :2] += x_y_offset
anchors = [(_a[0] / stride, _a[1] / stride) for _a in anchors]
anchors = torch.tensor(anchors, dtype=torch.float, device=device)
anchors = anchors.repeat(grid_size * grid_size, 1).unsqueeze(0)
x[..., 2:4] = torch.exp(x[..., 2:4]) * anchors
x[..., :4] *= stride
def prepare_detection_result(x, img_width, num_class, anchors):
"""
:param x:
:param img_width:
:param num_class:
:param anchors:
:return: tensor of shape batch_size * (grid_size * grid_size * num_anchors) * (5 + num_class),
and the bbox is of shape (center_x, center_y, width, height)
"""
batch_size, _, width, _ = x.size()
stride = img_width // width
grid_size = img_width // stride
bbox_attr_num = 5 + num_class
num_anchors = len(anchors)
x = x.reshape(batch_size, bbox_attr_num * num_anchors, grid_size * grid_size)
x = x.transpose(1, 2).contiguous()
x = x.reshape(batch_size, grid_size * grid_size * num_anchors, bbox_attr_num)
# predict bbox center
x[..., 0] = torch.sigmoid(x[..., 0])
x[..., 1] = torch.sigmoid(x[..., 1])
# object confidence
x[..., 4] = torch.sigmoid(x[..., 4])
# class probability
x[..., 5:] = torch.sigmoid(x[..., 5:])
return x, grid_size, stride
class NamedLayer(nn.Sequential):
"""
"""
def __init__(self, name, start=None, end=None, bn=False):
super().__init__()
self.name = name
self.start = start
self.end = end
self.bn = bn
class UpsampleLayer(nn.Module):
"""
"""
def __init__(self, scale_factor, mode='bilinear'):
"""
:param scale_factor:
:param mode:
"""
super().__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
"""
:param x:
:return:
"""
return nn.functional.interpolate(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=True)
def get_convolutional_layer(block, prev_out_channels, index, num_class=None):
"""
:param block:
:param prev_out_channels:
:param index:
:param num_class:
:return:
"""
in_channels = prev_out_channels
if num_class is None:
out_channels = block['filters']
else:
out_channels = 3 * (num_class + 5)
kernel_size = block['size']
stride = block['stride']
padding = block['pad']
if block.get('batch_normalize'):
bias = False
batch_normalize = True
else:
bias = True
batch_normalize = False
if padding:
padding = (kernel_size - 1) // 2
model = NamedLayer('convolutional', bn=batch_normalize)
conv_layer = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
model.add_module(f'conv_{index}: ', conv_layer)
if batch_normalize:
bn_layer = nn.BatchNorm2d(out_channels)
model.add_module(f'bn_{index}', bn_layer)
activation = block.get('activation')
if activation == 'leaky':
activation_layer = nn.LeakyReLU(0.1, inplace=True)
model.add_module(f'activation_{index}', activation_layer)
return model, out_channels
def get_shortcut_layer(block, index):
"""
:param block:
:param index:
:return:
"""
from_index = block['from']
if from_index > 0:
from_index = from_index - index
module = NamedLayer('shortcut', start=from_index)
shortcut_layer = EmptyLayer(start=from_index)
module.add_module(f'shortcut_{index}', shortcut_layer)
return module
def get_upsample_layer(block, index):
"""
:param block:
:param index:
:return:
"""
module = NamedLayer('upsample')
stride = block['stride']
upsample_layer = UpsampleLayer(scale_factor=stride, mode='bilinear')
module.add_module(f'upsample_{index}', upsample_layer)
return module
def get_route_layer(block, index):
"""
:param block:
:param index:
:return:
"""
_layers = block['layers']
if isinstance(_layers, int):
layers = [_layers]
else:
layers = [int(_x.strip()) for _x in _layers.split(',')]
for i, x in enumerate(layers):
if x > 0:
x -= index
layers[i] = x
if len(layers) == 2:
start_index, end_index = layers
elif len(layers) == 1:
start_index = layers[0]
end_index = None
else:
raise ValueError(f'Illegal argument layers {_layers}.')
module = NamedLayer('route', start=start_index, end=end_index)
route_layer = EmptyLayer(start=start_index, end=end_index)
module.add_module(f'route_{index}', route_layer)
return module
def get_detection_layer(block, index, num_class, anchors):
"""
:param block:
:param index:
:param num_class:
:param anchors:
:return:
"""
module = NamedLayer('detection')
detection_layer = DetectionLayer(block, num_class, anchors)
module.add_module(f'detection_{index}', detection_layer)
return module, detection_layer.used_anchors
def get_model_list(block_list, num_class, anchors):
"""
"""
net_info = block_list[0]
module_dct = nn.ModuleDict()
prev_out_channels = 3
size_list = []
used_anchor_list = []
for index, block in enumerate(block_list[1:]):
name = block['name']
try:
next_block = block_list[index + 2]
except IndexError:
next_name = None
else:
next_name = next_block['name']
if name == 'convolutional':
if next_name == 'yolo':
_c_num = num_class
else:
_c_num = None
module, out_channels = get_convolutional_layer(block,
prev_out_channels=prev_out_channels,
index=index,
num_class=_c_num)
elif name == 'upsample':
module = get_upsample_layer(block, index)
out_channels = prev_out_channels
elif name == 'shortcut':
module = get_shortcut_layer(block, index)
out_channels = prev_out_channels
elif name == 'route':
module = get_route_layer(block, index)
start_index = module.start
end_index = module.end
if end_index is not None:
out_channels = size_list[start_index + index] + size_list[end_index + index]
else:
out_channels = size_list[start_index + index]
elif name == 'yolo':
module, sub_anchors = get_detection_layer(block, index, num_class, anchors)
out_channels = prev_out_channels
used_anchor_list.append(sub_anchors)
else:
raise ValueError
module_name = module.name + '_' + str(index)
module_dct[module_name] = module
size_list.append(out_channels)
prev_out_channels = out_channels
return net_info, module_dct, used_anchor_list
def read_weights_file(weight_file):
"""
:param weight_file:
:return:
"""
with open(weight_file, 'rb') as fr:
head = np.fromfile(fr, dtype=np.uint32, count=5)
weights = np.fromfile(fr, dtype=np.float32)
return head, weights
class YOLOv3(nn.Module):
"""
"""
def __init__(self, cfg_file,
name_tuple,
anchors=None,
use_cuda=torch.cuda.is_available(),
**kwargs):
"""
:param cfg_file:
:param name_tuple:
:param anchors:
:param use_cuda:
:param kwargs:
"""
super().__init__()
self.use_cuda = use_cuda
if self.use_cuda and (not torch.cuda.is_available()):
raise ValueError('The value of use_cuda is True, but no compatible device find.')
# self.use_cuda = False
self.anchors = anchors
self.name_tuple = name_tuple
self.num_class = len(name_tuple)
block_list = cfg_parser(cfg_file)
net_info, module_dct, used_anchor_list = get_model_list(block_list, self.num_class, anchors)
self.anchor_tuple = tuple(used_anchor_list)
self.__net_info = net_info
self.img_width = net_info['width']
self.img_height = net_info['height']
self.head = None
self.img_count = 0
self.coordinate_rate = kwargs.get('coordinate_rate', 5)
self.no_object_rate = kwargs.get('no_object_rate', 0.2)
self.confidence_thresh = kwargs.get('confidence_thresh', 0.8)
self.iou_thresh = kwargs.get('iou_thresh', 0.4)
for name, module in module_dct.items():
self.add_module(name, module)
self.localization_loss = nn.MSELoss(reduction='sum')
self.classification_loss = nn.BCELoss(reduction='sum')
self.confidence_loss = nn.BCELoss(reduction='sum')
self.__bn_attrs = ('bias', 'weight', 'running_mean', 'running_var')
self.__conv_attrs_without_bias = ('weight',)
self.__conv_attrs_with_bias = ('bias', 'weight')
def load_weights(self, weight_file):
"""
:param weight_file:
:return:
"""
bn_attrs = self.__bn_attrs
conv_attrs_without_bias = self.__conv_attrs_without_bias
conv_attrs_with_bias = self.__conv_attrs_with_bias
head, weights = read_weights_file(weight_file)
def _load_singlelayer_parameter(_layer, _attr_tuple, _start_index):
"""
:param _layer:
:param _attr_tuple:
:param _start_index:
:return:
"""
for _attr in _attr_tuple:
_para = getattr(_layer, _attr)
_para_count = _para.numel()
_end_index = _start_index + _para_count
_new_para = torch.from_numpy(weights[_start_index:_end_index])
_new_para = _new_para.view_as(_para)
_para.data.copy_(_new_para)
_start_index = _end_index
return _start_index
def _load_layer_parameter(_layer, _start_index):
"""
:param _layer:
:param _start_index:
:return:
"""
if _layer.name != 'convolutional':
return _start_index
_conv_layer = _layer[0]
if _layer.bn:
_bn_layer = _layer[1]
_start_index = _load_singlelayer_parameter(_bn_layer, bn_attrs, _start_index)
_start_index = _load_singlelayer_parameter(_conv_layer, conv_attrs_without_bias, _start_index)
else:
_start_index = _load_singlelayer_parameter(_conv_layer, conv_attrs_with_bias, _start_index)
return _start_index
self.head = head
self.img_count = head[3]
start_index = 0
for layer in self.children():
try:
name = layer.name
except AttributeError:
continue
if name != 'convolutional':
continue
start_index = _load_layer_parameter(layer, start_index)
def save_weights(self, filename):
"""
:param filename:
:return:
"""
bn_attrs = self.__bn_attrs
conv_attrs_without_bias = self.__conv_attrs_without_bias
conv_attrs_with_bias = self.__conv_attrs_with_bias
def _get_single_layer_parameter(_layer, _attr_tuple):
"""
:param _layer:
:param _attr_tuple:
:return:
"""
for _attr in _attr_tuple:
_para = getattr(_layer, _attr)
_res = _para.cpu().detach_().numpy().astype(np.float32)
yield _res
# _res.tofile(fr)
def _get_layer_parameter(_layer):
"""
:param _layer:
:return:
"""
_conv_layer = _layer[0]
if _layer.bn:
_bn_layer = _layer[1]
# _get_single_layer_parameter(_bn_layer, bn_attrs)
# _get_single_layer_parameter(_conv_layer, conv_attrs_without_bias)
yield from _get_single_layer_parameter(_bn_layer, bn_attrs)
yield from _get_single_layer_parameter(_conv_layer, conv_attrs_without_bias)
else:
# _get_single_layer_parameter(_conv_layer, conv_attrs_with_bias)
yield from _get_single_layer_parameter(_conv_layer, conv_attrs_with_bias)
head = self.head
head[3] = self.img_count
head = np.array(head, dtype=np.uint32)
with open(filename, 'wb') as fr:
head.tofile(fr)
for layer in self.children():
try:
name = layer.name
except AttributeError:
continue
if name == 'convolutional':
# _get_layer_parameter(layer)
for para in _get_layer_parameter(layer):
para.tofile(fr)
def forward(self, x, label=None):
"""
:param x:
:param label:
:return:
"""
is_train = self.training
# store output from each layer
output_list = []
# store detection result from different stage
prediction_list = []
loss_list = []
num_class = self.num_class
for index, module in enumerate(self.children()):
try:
name = module.name
except AttributeError:
continue
if name == 'convolutional' or name == 'upsample':
x = module(x)
elif name == 'shortcut':
start_index = module.start
x = output_list[index + start_index] + output_list[index - 1]
elif name == 'route':
start_index = module.start
end_index = module.end
if end_index is None:
x = output_list[index + start_index]
else:
x_1 = output_list[index + start_index]
x_2 = output_list[index + end_index]
x = torch.cat((x_1, x_2), dim=1)
elif name == 'detection':
detection_layer = module[0]
img_width = self.img_width
num_class = detection_layer.class_num
anchors = detection_layer.used_anchors
x, grid_size, stride = prepare_detection_result(x, img_width, num_class, anchors)
if is_train and label is not None:
target = utils.prepare_target(label, anchors, num_class, grid_size, stride)
stage_loss = self.loss(x, target)
loss_list.append(stage_loss)
process_detection(x, grid_size, stride, anchors)
prediction_list.append(x)
output_list.append(x)
prediction_res = torch.cat(prediction_list, 1)
if label is None:
return prediction_res
else:
true_label = label
true_label[..., :4] *= self.img_width
prediction = prediction_res.detach().clone()
prediction_count_result = utils.count_prediction(prediction,
label,
num_class,
confidence_thresh=self.confidence_thresh,
iou_thresh=self.iou_thresh)
if is_train:
local_loss = sum(x[0] for x in loss_list)
conf_loss = sum(x[1] for x in loss_list)
class_loss = sum(x[2] for x in loss_list)
return local_loss, conf_loss, class_loss
else:
return prediction_res, prediction_count_result
def loss(self, prediction, target):
"""
:param prediction:
:param target:
:return:
"""
localization_loss = self.localization_loss
confidence_loss = self.confidence_loss
classification_loss = self.classification_loss
if self.use_cuda:
localization_loss = localization_loss.cuda()
confidence_loss = confidence_loss.cuda()
classification_loss = classification_loss.cuda()
mask = (target[..., 4] > 0)
negative_mask = 1 - mask
local_loss = localization_loss(prediction[..., :4][mask], target[..., : 4][mask])
_pos_conf_loss = confidence_loss(prediction[..., 4][mask], target[..., 4][mask])
_neg_conf_loss = confidence_loss(prediction[..., 4][negative_mask], target[..., 4][negative_mask])
conf_loss = _pos_conf_loss + self.no_object_rate * _neg_conf_loss
class_loss = classification_loss(prediction[..., 5:][mask], target[..., 5:][mask])
batch_num = prediction.shape[0]
# grid_size = prediction.shape[1]
# loss = torch.tensor([local_loss / batch_num, conf_loss / batch_num, class_loss / batch_num])
# loss = local_loss + conf_loss + class_loss
loss_tuple = (local_loss / batch_num, conf_loss / batch_num, class_loss / batch_num)
return loss_tuple
def load_model(cfg_filename=None,
weight_filename=None,
model_filename=None,
anchors=None,
class_names=None,
use_cuda=False,
**kwargs):
"""
:param cfg_filename:
:param weight_filename:
:param model_filename:
:param anchors:
:param class_names:
:param use_cuda:
:param kwargs:
:return:
"""
if model_filename is None:
_, name_tuple = utils.parse_class_names(class_names)
module = YOLOv3(cfg_filename, name_tuple, anchors, use_cuda=use_cuda, **kwargs)
if weight_filename is not None:
module.load_weights(weight_filename)
else:
module = torch.load(model_filename)
if module.use_cuda:
module = module.cuda()
return module
``` |
{
"source": "JiananYuan/Learned-Indexes",
"score": 3
} |
#### File: JiananYuan/Learned-Indexes/btree.py
```python
import pandas as pd
# Node in BTree
class BTreeNode:
def __init__(self, degree=2, number_of_keys=0, is_leaf=True, items=None, children=None,
index=None):
self.isLeaf = is_leaf
self.numberOfKeys = number_of_keys
self.index = index
if items is not None:
self.items = items
else:
self.items = [None] * (degree * 2 - 1)
if children is not None:
self.children = children
else:
self.children = [None] * degree * 2
def set_index(self, index):
self.index = index
def get_index(self):
return self.index
def search(self, b_tree, an_item):
i = 0
while i < self.numberOfKeys and an_item > self.items[i]:
i += 1
if i < self.numberOfKeys and an_item == self.items[i]:
return {'found': True, 'fileIndex': self.index, 'nodeIndex': i}
if self.isLeaf:
return {'found': False, 'fileIndex': self.index, 'nodeIndex': i - 1}
else:
return b_tree.get_node(self.children[i]).search(b_tree, an_item)
# BTree Class
class BTree:
def __init__(self, degree=2, nodes=None, root_index=1, free_index=2):
if nodes is None:
nodes = {}
self.degree = degree
if len(nodes) == 0:
self.rootNode = BTreeNode(degree)
self.nodes = {}
self.rootNode.set_index(root_index)
self.write_at(1, self.rootNode)
else:
self.nodes = nodes
self.rootNode = self.nodes[root_index]
self.rootIndex = root_index
self.freeIndex = free_index
def build(self, keys, values):
if len(keys) != len(values):
return
for ind in range(len(keys)):
self.insert(Item(keys[ind], values[ind]))
def search(self, an_item):
return self.rootNode.search(self, an_item)
def predict(self, key):
search_result = self.search(Item(key, 0))
a_node = self.nodes[search_result['fileIndex']]
if a_node.items[search_result['nodeIndex']] is None:
return -1
return a_node.items[search_result['nodeIndex']].v
def split_child(self, p_node, i, c_node):
new_node = self.get_free_node()
new_node.isLeaf = c_node.isLeaf
new_node.numberOfKeys = self.degree - 1
for j in range(0, self.degree - 1):
new_node.items[j] = c_node.items[j + self.degree]
if c_node.isLeaf is False:
for j in range(0, self.degree):
new_node.children[j] = c_node.children[j + self.degree]
c_node.numberOfKeys = self.degree - 1
j = p_node.numberOfKeys + 1
while j > i + 1:
p_node.children[j + 1] = p_node.children[j]
j -= 1
p_node.children[j] = new_node.get_index()
j = p_node.numberOfKeys
while j > i:
p_node.items[j + 1] = p_node.items[j]
j -= 1
p_node.items[i] = c_node.items[self.degree - 1]
p_node.numberOfKeys += 1
def insert(self, an_item):
search_result = self.search(an_item)
if search_result['found']:
return None
r = self.rootNode
if r.numberOfKeys == 2 * self.degree - 1:
s = self.get_free_node()
self.set_root_node(s)
s.isLeaf = False
s.numberOfKeys = 0
s.children[0] = r.get_index()
self.split_child(s, 0, r)
self.insert_not_full(s, an_item)
else:
self.insert_not_full(r, an_item)
def insert_not_full(self, inNode, anItem):
i = inNode.numberOfKeys - 1
if inNode.isLeaf:
while i >= 0 and anItem < inNode.items[i]:
inNode.items[i + 1] = inNode.items[i]
i -= 1
inNode.items[i + 1] = anItem
inNode.numberOfKeys += 1
else:
while i >= 0 and anItem < inNode.items[i]:
i -= 1
i += 1
if self.get_node(inNode.children[i]).numberOfKeys == 2 * self.degree - 1:
self.split_child(inNode, i, self.get_node(inNode.children[i]))
if anItem > inNode.items[i]:
i += 1
self.insert_not_full(self.get_node(inNode.children[i]), anItem)
def delete(self, an_item):
an_item = Item(an_item, 0)
search_result = self.search(an_item)
if search_result['found'] is False:
return None
r = self.rootNode
self.delete_in_node(r, an_item, search_result)
def delete_in_node(self, a_node, an_item, search_result):
if a_node.index == search_result['fileIndex']:
i = search_result['nodeIndex']
if a_node.isLeaf:
while i < a_node.numberOfKeys - 1:
a_node.items[i] = a_node.items[i + 1]
i += 1
a_node.numberOfKeys -= 1
else:
left = self.get_node(a_node.children[i])
right = self.get_node(a_node.children[i + 1])
if left.numberOfKeys >= self.degree:
a_node.items[i] = self.get_right_most(left)
elif right.numberOfKeys >= self.degree:
a_node.items[i] = self.get_right_most(right)
else:
k = left.numberOfKeys
left.items[left.numberOfKeys] = an_item
left.numberOfKeys += 1
for j in range(0, right.numberOfKeys):
left.items[left.numberOfKeys] = right.items[j]
left.numberOfKeys += 1
del self.nodes[right.get_index()]
for j in range(i, a_node.numberOfKeys - 1):
a_node.items[j] = a_node.items[j + 1]
a_node.children[j + 1] = a_node.children[j + 2]
a_node.numberOfKeys -= 1
if a_node.numberOfKeys == 0:
del self.nodes[a_node.get_index()]
self.set_root_node(left)
self.delete_in_node(left, an_item, {'found': True, 'fileIndex': left.index, 'nodeIndex': k})
else:
i = 0
while i < a_node.numberOfKeys and self.get_node(a_node.children[i]).search(self, an_item)['found'] is False:
i += 1
c_node = self.get_node(a_node.children[i])
if c_node.numberOfKeys < self.degree:
j = i - 1
while j < i + 2 and self.get_node(a_node.children[j]).numberOfKeys < self.degree:
j += 1
if j == i - 1:
sNode = self.get_node(a_node.children[j])
k = c_node.numberOfKeys
while k > 0:
c_node.items[k] = c_node.items[k - 1]
c_node.children[k + 1] = c_node.children[k]
k -= 1
c_node.children[1] = c_node.children[0]
c_node.items[0] = a_node.items[i - 1]
c_node.children[0] = sNode.children[sNode.numberOfKeys]
c_node.numberOfKeys += 1
a_node.items[i - 1] = sNode.items[sNode.numberOfKeys - 1]
sNode.numberOfKeys -= 1
elif j == i + 1:
sNode = self.get_node(a_node.children[j])
c_node.items[c_node.numberOfKeys] = a_node.items[i]
c_node.children[c_node.numberOfKeys + 1] = sNode.children[0]
a_node.items[i] = sNode.items[0]
for k in range(0, sNode.numberOfKeys):
sNode.items[k] = sNode.items[k + 1]
sNode.children[k] = sNode.children[k + 1]
sNode.children[k] = sNode.children[k + 1]
sNode.numberOfKeys -= 1
else:
j = i + 1
sNode = self.get_node(a_node.children[j])
c_node.items[c_node.numberOfKeys] = a_node.items[i]
c_node.numberOfKeys += 1
for k in range(0, sNode.numberOfKeys):
c_node.items[c_node.numberOfKeys] = sNode.items[k]
c_node.numberOfKeys += 1
del self.nodes[sNode.index]
for k in range(i, a_node.numberOfKeys - 1):
a_node.items[i] = a_node.items[i + 1]
a_node.children[i + 1] = a_node.items[i + 2]
a_node.numberOfKeys -= 1
if a_node.numberOfKeys == 0:
del self.nodes[a_node.index]
self.set_root_node(c_node)
self.delete_in_node(c_node, an_item, c_node.search(self, an_item))
def get_right_most(self, aNode):
if aNode.children[aNode.numberOfKeys] is None:
upItem = aNode.items[aNode.numberOfKeys - 1]
self.delete_in_node(aNode, upItem,
{'found': True, 'fileIndex': aNode.index, 'nodeIndex': aNode.numberOfKeys - 1})
return upItem
else:
return self.get_right_most(self.get_node(aNode.children[aNode.numberOfKeys]))
def set_root_node(self, r):
self.rootNode = r
self.rootIndex = self.rootNode.get_index()
def get_node(self, index):
return self.nodes[index]
def get_free_node(self):
new_node = BTreeNode(self.degree)
index = self.get_free_index()
new_node.set_index(index)
self.write_at(index, new_node)
return new_node
def get_free_index(self):
self.freeIndex += 1
return self.freeIndex - 1
def write_at(self, index, a_node):
self.nodes[index] = a_node
# Value in Node
class Item():
def __init__(self, k, v):
self.k = k
self.v = v
def __gt__(self, other):
if self.k > other.k:
return True
else:
return False
def __ge__(self, other):
if self.k >= other.k:
return True
else:
return False
def __eq__(self, other):
if self.k == other.k:
return True
else:
return False
def __le__(self, other):
if self.k <= other.k:
return True
else:
return False
def __lt__(self, other):
if self.k < other.k:
return True
else:
return False
# For Test
def b_tree_main():
path = "last_data.csv"
data = pd.read_csv(path)
b = BTree(2)
for i in range(data.shape[0]):
b.insert(Item(data.ix[i, 0], data.ix[i, 1]))
pos = b.predict(30310)
print(pos)
if __name__ == '__main__':
b_tree_main()
``` |
{
"source": "jianaosiding/HomeWork",
"score": 3
} |
#### File: HomeWork/ganji/channel_url.py
```python
import requests
from bs4 import BeautifulSoup
import time
start_url = 'http://bj.ganji.com/zhaopin/'
host_url = 'http://bj.ganji.com'
def get_channel_url(url):
# 此时程序会暂停2秒
time.sleep(2)
web_data = requests.get(url)
soup = BeautifulSoup(web_data.text, 'lxml')
links = soup.select('div.f-all-news > dl > dt > a')
for link in links:
# 组成一个完整的url
item_link = host_url + link.get('href')
print(item_link)
# 运行函数
get_channel_url(start_url)
# channel_list 为运行结果
channel_list = '''
http://bj.ganji.com/zpshichangyingxiao/
http://bj.ganji.com/zpjigongyibangongren/
http://bj.ganji.com/zpxingzhenghouqin/
http://bj.ganji.com/zprenliziyuan/
http://bj.ganji.com/zpjiudiancanyin/
http://bj.ganji.com/zpkefu/
http://bj.ganji.com/zptaobao/
http://bj.ganji.com/zpbaihuolingshou/
http://bj.ganji.com/zpjiazhenganbao/
http://bj.ganji.com/zpsiji/
http://bj.ganji.com/zpcaiwushenji/
http://bj.ganji.com/zpfangjingjiren/
http://bj.ganji.com/zpmeirongmeifazhiwei/
http://bj.ganji.com/zpbaojiananmo/
http://bj.ganji.com/zpyundongjianshenzhiwei/
http://bj.ganji.com/zpqiche/
http://bj.ganji.com/zpjisuanjiwangluo/
http://bj.ganji.com/zpshichanggongguan/
http://bj.ganji.com/zpguanggaohuizhanzhiwei/
http://bj.ganji.com/zpmeishusheji/
http://bj.ganji.com/zpmeitiyingshi/
http://bj.ganji.com/zplvyouzhiwei/
http://bj.ganji.com/zpjinrongtouzizhengquan/
http://bj.ganji.com/zpbaoxianjingjiren/
http://bj.ganji.com/zpzixunguwen/
http://bj.ganji.com/zpfanyi/
http://bj.ganji.com/zpjiaoyupeixun/
http://bj.ganji.com/zpbianjichuban/
http://bj.ganji.com/zpfalv/
http://bj.ganji.com/zpmaoyiyunshu/
http://bj.ganji.com/zpshengchanzhizaozhiwei/
http://bj.ganji.com/zpdianqinengyuan/
http://bj.ganji.com/zpwuye/
http://bj.ganji.com/zpjianzhuzhuangxiu/
http://bj.ganji.com/zpjixieyiqiyibiao/
http://bj.ganji.com/zpyiyaoshengwu/
http://bj.ganji.com/zpyiyuanyiliao/
http://bj.ganji.com/zpnonglin/
http://bj.ganji.com/zphuanjingbaohu/
http://bj.ganji.com/zpruanjianhulianwang/
http://bj.ganji.com/zpityunweiyuceshi/
http://bj.ganji.com/zphulianwangchanpinyunyingguanli/
http://bj.ganji.com/zpdianzidianqibandaoti/
http://bj.ganji.com/zpxintuodanbaopaimaidiandang/
http://bj.ganji.com/zpfuzhuangfangzhipigeshengchan/
http://bj.ganji.com/zphuagong/
http://bj.ganji.com/zpgaojiguanli/
http://bj.ganji.com/zpqita/
'''
``` |
{
"source": "JianaXu/productivity",
"score": 2
} |
#### File: productivity/input/forms.py
```python
from django import forms
from input.models import Post, Task
from django.contrib.auth.models import User
import json
from django.core.serializers.json import DjangoJSONEncoder
class ActionForm(forms.ModelForm):
user_name = forms.CharField()
task = forms.CharField(required = False)
work_complete = forms.CharField(required = False)
class Meta:
model = Post
fields = ('user_name', 'task', 'work_complete')
def clean(self):
self.cleaned_data['user_name']
self.cleaned_data['task']
self.cleaned_data['work_complete']
return self.cleaned_data
class GroupForm(forms.ModelForm):
member_name = forms.CharField(label = '')
class Meta:
model = Post
fields = ('member_name', )
``` |
{
"source": "jianbaishi/tensorflow-learn",
"score": 2
} |
#### File: python/framework/function_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.framework import function
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_logging_ops
def _OptimizerOptions():
for cse in [False, True]:
for inline in [False, True]:
for cfold in [False, True]:
yield tf.ConfigProto(graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L0,
do_common_subexpression_elimination=cse,
do_function_inlining=inline,
do_constant_folding=cfold)))
class FunctionTest(tf.test.TestCase):
def _mat(self, x):
return np.array([x]).astype("float32").reshape([1, 1])
def testBasic(self):
g = tf.Graph()
# Define a function
# foo(a:float, b:float, c:float)->u:float,v:float,w:float
# u = matmul(a, b) + c
# v = u^2
# w = u + v
foo = tf.Graph()
with foo.as_default():
a = tf.placeholder(tf.float32, name="a")
b = tf.placeholder(tf.float32, name="b")
c = tf.placeholder(tf.float32, name="c")
u = tf.add(tf.matmul(a, b), c, name="u")
v = tf.square(u, name="v")
w = tf.add_n([u, v], name="w")
fdef = function._graph_to_function_def(foo, "foo", [a, b, c], [u, v, w])
class Mock(function._DefinedFunction):
def __init__(self, fdef):
self._func_name = "foo"
self._definition = fdef
self._sub_functions = collections.OrderedDict()
self._grad_func = None
self._python_grad_func = None
self._hash = hash(fdef.SerializeToString())
g._add_function(Mock(fdef))
# Compute 2 * 3 + 4 and its square.
with g.as_default(), tf.Session() as sess:
two = tf.constant(self._mat(2.0), name="two")
three = tf.constant(self._mat(3.0), name="three")
four = tf.constant(self._mat(4.0), name="four")
# TODO(zhifengc): w/ @decorator sugar, we will just do:
# y, s, t = foo_func(two, three, four)
# The graph contains two ops each of which calls foo.
u0, v0, w0 = g.create_op(
"foo", [two, three, four], [tf.float32, tf.float32, tf.float32],
compute_shapes=False).outputs
u1, v1, w1 = g.create_op(
"foo", [four, two, three], [tf.float32, tf.float32, tf.float32],
compute_shapes=False).outputs
# Checks some property of the graph def.
gdef = g.as_graph_def()
self.assertEqual(len(gdef.node), 5) # 5 nodes added.
self.assertEqual(len(gdef.library.function), 1) # 1 function is defined.
for _ in xrange(10):
# Run the graph, which is basically two function calls.
ans_u0, ans_v0, ans_w0, ans_u1, ans_v1, ans_w1 = sess.run([u0, v0, w0,
u1, v1, w1])
self.assertAllEqual(ans_u0, self._mat(10.0)) # 2 * 3 + 4 = 10
self.assertAllEqual(ans_v0, self._mat(100.0)) # 10^2 = 100
self.assertAllEqual(ans_w0, self._mat(110.0)) # 100 + 10 = 110
self.assertAllEqual(ans_u1, self._mat(11.0)) # 4 * 2 + 3 = 11
self.assertAllEqual(ans_v1, self._mat(121.0)) # 11^2 = 121
self.assertAllEqual(ans_w1, self._mat(132.0)) # 11 + 121 = 132
def testDefineFunction2Args(self):
@function.Defun(tf.float32, tf.float32)
def APlus2B(a, b):
return a + b * 2
with tf.Graph().as_default():
call = APlus2B([1.0], [2.0])
self.assertEquals("APlus2B", call.op.name)
with tf.Session() as sess:
self.assertAllEqual([5.0], sess.run(call))
def testGradientFunc(self):
@function.Defun(tf.float32, func_name="XSquarePlusOneFn")
def XSquarePlusOne(x):
return x * x + 1.0
@function.Defun(tf.float32, tf.float32)
def XSquarePlusOneGrad(x, dy):
dx = functional_ops._symbolic_gradient(
input=[x, dy], Tout=[tf.float32], f="XSquarePlusOneFn", name="dx")
return dx
g = tf.Graph()
with g.as_default():
call_f = XSquarePlusOne([2.0])
call_g = XSquarePlusOneGrad([2.0], [0.1])
with tf.Session() as sess:
self.assertAllClose([5.0], sess.run(call_f))
self.assertAllClose([0.4], sess.run(call_g))
def testTanhSymGrad(self):
@function.Defun(tf.float32)
def Forward(x):
return tf.reduce_sum(tf.tanh(x))
g = tf.Graph()
with g.as_default():
x = tf.placeholder(tf.float32)
y = Forward(x)
dx = tf.gradients([y], [x])
inp = np.array([-1, 1, 2, -2], dtype=np.float32)
feed = {x: inp}
cfg = tf.ConfigProto(graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=True)))
with tf.Session(graph=g, config=cfg) as sess:
out, = sess.run(dx, feed)
self.assertAllClose(1 - np.square(np.tanh(inp)), out)
def testCustomGradient(self):
dtype = tf.float32
@function.Defun(dtype, dtype, dtype)
def XentLossGrad(logits, labels, dloss):
dlogits = tf.reshape(dloss, [-1, 1]) * (tf.nn.softmax(logits) - labels)
dlabels = tf.zeros_like(labels)
# Takes exp(dlogits) to differentiate it from the "correct" gradient.
return tf.exp(dlogits), dlabels
@function.Defun(dtype, dtype, grad_func=XentLossGrad)
def XentLoss(logits, labels):
return tf.reduce_sum(labels * tf.log(tf.nn.softmax(logits)), 1)
g = tf.Graph()
with g.as_default():
logits = tf.placeholder(dtype)
labels = tf.placeholder(dtype)
loss = XentLoss(logits, labels)
dlogits = tf.gradients([loss], [logits])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
prob = np.exp(x) / np.sum(np.exp(x), 1, keepdims=1)
y = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
for cfg in _OptimizerOptions():
print("cfg = ", cfg)
with tf.Session(graph=g, config=cfg) as sess:
out, = sess.run(dlogits, {logits: x, labels: y})
self.assertAllClose(out, np.exp(prob - y))
def testCustomGradientError(self):
dtype = tf.float32
@function.Defun(dtype, dtype, dtype)
def Grad(x, dy, dz):
# Should have returned 1 result.
return x, dy + dz
@function.Defun(dtype, grad_func=Grad)
def Forward(x):
return x, x
g = tf.Graph()
with g.as_default():
inp = tf.placeholder(dtype)
out = tf.add_n(Forward(inp))
dinp = tf.gradients(out, [inp])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
with tf.Session(graph=g) as sess:
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"SymGrad expects to return 1.*but get 2.*instead"):
_ = sess.run(dinp, {inp: x})
def testSymGradShape(self):
g = tf.Graph()
with g.as_default():
x = tf.placeholder(tf.float32, [25, 4])
y = tf.placeholder(tf.float32, [200, 100])
dz = tf.placeholder(tf.float32, [1])
# We assume Foo is a function of (x, y) -> (z) Then, Foo's
# gradient function is (x, y, dz) -> (dx, dy). dx's shape
# should be the same as x's; and dy's shape should be the same
# as y's.
dx, dy = functional_ops._symbolic_gradient(
input=[x, y, dz], Tout=[tf.float32] * 2, f="Foo")
self.assertEquals(x.get_shape(), dx.get_shape())
self.assertEquals(y.get_shape(), dy.get_shape())
def testZNoDepOnY(self):
@function.Defun(tf.float32, tf.float32)
def Foo(x, y): # pylint: disable=unused-argument
return x * 2
with tf.Graph().as_default():
# z = Foo(x, y). z doe
x = tf.constant(1.0)
y = tf.constant(2.0)
z = Foo(x, y)
dx, dy = tf.gradients([z], [x, y])
with tf.Session() as sess:
dx_val, dy_val = sess.run([dx, dy])
self.assertEquals([2.0], dx_val)
self.assertEquals([0.0], dy_val)
def testDefineFunctionNoArgs(self):
@function.Defun()
def AConstant():
return tf.constant([42])
with tf.Graph().as_default():
call = AConstant()
self.assertEquals("AConstant", call.op.name)
with tf.Session() as sess:
self.assertAllEqual([42], sess.run(call))
def testDefineFunctionNames(self):
@function.Defun(tf.float32)
def Foo(a):
return a + 1
with tf.Graph().as_default():
call1 = Foo([1.0])
self.assertEquals("Foo", call1.op.name)
call2 = Foo([1.0])
self.assertEquals("Foo_1", call2.op.name)
# pylint: disable=unexpected-keyword-arg
call3 = Foo([1.0], name="mine")
self.assertEquals("mine", call3.op.name)
with tf.name_scope("my"):
call4 = Foo([1.0], name="precious")
self.assertEquals("my/precious", call4.op.name)
def testNoOp(self):
@function.Defun(tf.float32)
def Foo(x):
y = tf.Print(x, [x], "Hello")
with tf.control_dependencies([y]):
z = tf.no_op()
with tf.control_dependencies([z]):
return x * 2
with tf.Graph().as_default(), self.test_session():
z = Foo(tf.constant(3.0))
self.assertAllEqual(z.eval(), 6.0)
def testAssert(self):
@function.Defun(tf.float32)
def Foo(x):
check = gen_logging_ops._assert(tf.greater(x, 0), [x])
with tf.control_dependencies([check]):
return x * 2
g = tf.Graph()
with g.as_default(), self.test_session():
self.assertAllEqual(Foo(tf.constant(3.0)).eval(), 6.0)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"assertion failed.*-3"):
self.assertAllEqual(Foo(tf.constant(-3.0)).eval(), 6.0)
def testVar(self):
@function.Defun(tf.float32)
def Foo(x):
return x * x + 1
g = tf.Graph()
with g.as_default():
v = tf.Variable(tf.constant(10.0))
z = Foo(v)
with self.test_session(graph=g):
tf.initialize_all_variables().run()
self.assertAllEqual(z.eval(), 101.)
def testDefineErrors(self):
with tf.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "return at least one tensor"):
@function.Defun()
def NoResult():
pass
_ = NoResult.definition
with self.assertRaisesRegexp(ValueError, "are not supported"):
@function.Defun()
def DefaultArg(unused_a=12):
return tf.constant([1])
_ = DefaultArg.definition
with self.assertRaisesRegexp(ValueError, "are not supported"):
@function.Defun()
def KwArgs(**unused_kwargs):
return tf.constant([1])
_ = KwArgs.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun()
def PlusMinusV1(a, b):
return a + b, b - a
_ = PlusMinusV1.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun(tf.float32)
def PlusMinusV2(a, b):
return a + b, b - a
_ = PlusMinusV2.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun(tf.float32, tf.float32, tf.float32)
def PlusMinusV3(a, b):
return a + b, b - a
_ = PlusMinusV3.definition
def testCallErrors(self):
@function.Defun()
def Const():
return tf.constant(1)
@function.Defun(tf.int32)
def PlusOne(a):
return a + 1
@function.Defun(tf.int32, tf.int32)
def PlusMinus(a, b):
return a + b, b - a
with tf.Graph().as_default():
_ = Const()
# pylint: disable=too-many-function-args
# pylint: disable=unexpected-keyword-arg
# pylint: disable=no-value-for-parameter
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
_ = Const(1)
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
_ = Const(1, 2)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
_ = PlusOne()
_ = PlusOne(1)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
_ = PlusOne(1, 2)
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
_ = PlusMinus()
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
_ = PlusMinus(1)
_ = PlusMinus(1, 2)
_ = PlusOne(1, name="p1")
with self.assertRaisesRegexp(ValueError, "Unknown keyword arguments"):
_ = PlusOne(1, device="/gpu:0")
def testDupDefinition(self):
@function.Defun(tf.float32)
def Foo(x):
return x + 1
@function.Defun(tf.float32, func_name="Foo")
def Bar(x):
return x + 1
@function.Defun(tf.float32, func_name="Foo")
def Baz(x):
return x + 2
with tf.Graph().as_default():
y = Foo(100.0)
z = Bar(100.0) # OK.
with self.test_session():
self.assertAllEqual(y.eval(), z.eval())
with self.assertRaisesRegexp(ValueError, "already defined"):
z = Baz(100.0)
def testFunctionDecorator(self):
@function.Defun(tf.float32)
def Minus1(b):
return b - 1.0
with tf.Graph().as_default():
call1 = Minus1([2.])
self.assertTrue(isinstance(Minus1, function._DefinedFunction))
self.assertEqual(Minus1.name, "Minus1")
# pylint: disable=unexpected-keyword-arg
call2 = Minus1(call1, name="next")
# pylint: enable=unexpected-keyword-arg
self.assertEquals("next", call2.op.name)
with tf.Session() as sess:
self.assertAllEqual([1], sess.run(call1))
self.assertAllEqual([0], sess.run(call2))
def testNestedFunction(self):
@function.Defun(tf.float32)
def Cube(x):
return x * x * x
@function.Defun(tf.float32, tf.float32)
def CubeXPlusY(x, y):
return Cube(x) + y
with tf.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.test_session():
self.assertAllEqual(z.eval(), 25.0)
def testNestedDefinedFunction(self):
@function.Defun(tf.float32, tf.float32)
def CubeXPlusY(x, y):
@function.Defun(tf.float32)
def Cube(x):
return x * x * x
return Cube(x) + y
with tf.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.test_session():
self.assertAllEqual(z.eval(), 25.0)
def testUnusedFunction(self):
invoked = False
# pylint: disable=unused-variable
@function.Defun()
def Unused():
invoked = True
return tf.constant(42.)
self.assertFalse(invoked)
g = tf.Graph()
with g.as_default():
@function.Defun()
def Unused2():
invoked = True
return tf.constant(7.)
tf.constant(3.)
# pylint: enable=unused-variable
self.assertFalse(invoked)
gdef = g.as_graph_def()
self.assertEquals(0, len(gdef.library.function))
def testReduction(self):
g = tf.Graph()
# BN0 is computing batch normed matrix along rows.
def BN0(x):
mean = tf.reduce_mean(x, [0])
var = tf.reduce_mean(tf.square(x - mean)) # biased var
rstd = tf.rsqrt(var + 1e-8)
return (x - mean) * rstd
# Wraps BatchNorm in a tf function.
@function.Defun(tf.float32)
def BN1(x):
return BN0(x)
with g.as_default():
x = tf.placeholder(tf.float32)
y0 = BN0(x) # A plain graph
y1 = BN1(x) # A tf function
dx0, = tf.gradients([y0], [x])
dx1, = tf.gradients([y1], [x])
# Both should produce the same result and gradient.
with self.test_session(graph=g) as sess:
vals = sess.run([y0, y1, dx0, dx1], {x: np.random.uniform(size=(3, 7))})
self.assertAllClose(vals[0], vals[1])
self.assertAllClose(vals[2], vals[3])
def testDeclareTypeMistake(self):
foo = function.Declare("Foo", [tf.float32], [tf.float32])
@function.Defun(tf.float32)
def Foo(x):
return x * x + 1
g = tf.Graph()
with g.as_default():
y = foo(2.0)
with self.test_session(graph=g):
with self.assertRaisesRegexp(tf.errors.NotFoundError, "not registered"):
_ = y.eval()
g = tf.Graph()
with g.as_default():
Foo.add_to_graph(g)
y = foo(2)
with self.test_session(graph=g):
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"int32.*float"):
_ = y.eval()
g = tf.Graph()
with g.as_default():
Foo.add_to_graph(g)
with self.assertRaisesRegexp(
ValueError, "Expected number of arguments: 1, received: 2"):
_ = foo(2.0, 2.0)
g = tf.Graph()
with g.as_default():
Foo.add_to_graph(g)
y = foo(2.0)
with self.test_session(graph=g):
self.assertAllEqual(y.eval(), 5.0)
class UnrollLSTMTest(tf.test.TestCase):
BATCH_SIZE = 16
LSTM_DIMS = 32
NUM_UNROLL = 20
def _Weights(self):
dims = self.LSTM_DIMS
return tf.random_uniform([2 * dims, 4 * dims], -1, 1, seed=123456)
def _Input(self):
return tf.random_uniform(
[self.NUM_UNROLL, self.BATCH_SIZE, self.LSTM_DIMS], seed=654321)
# Helper to construct a LSTM cell graph.
@classmethod
def LSTMCell(cls, x, mprev, cprev, weights):
xm = tf.concat(1, [x, mprev])
i_i, i_g, f_g, o_g = tf.split(1, 4, tf.matmul(xm, weights))
new_c = tf.sigmoid(f_g) * cprev + tf.sigmoid(i_g) * tf.tanh(i_i)
new_c = tf.clip_by_value(new_c, -50.0, 50.0)
new_m = tf.sigmoid(o_g) * tf.tanh(new_c)
return new_m, new_c
def _BuildForward(self, weights, inp, mode="cell"):
def Loop(cell, w, i):
x = tf.unpack(i, self.NUM_UNROLL)
m = tf.zeros_like(x[0])
c = tf.zeros_like(x[0])
for i in range(self.NUM_UNROLL):
m, c = cell(x[i], m, c, w)
return m
cell = UnrollLSTMTest.LSTMCell
if mode == "complete":
# Constructs the complete graph in python.
return Loop(cell, weights, inp)
cell = function.Defun(tf.float32, tf.float32, tf.float32, tf.float32)(cell)
if mode == "cell":
# Just represent the LSTM as a function.
return Loop(cell, weights, inp)
if mode == "loop":
# Wraps the whole loop as a function.
@function.Defun(tf.float32, tf.float32)
def LSTMLoop(w, i):
return Loop(cell, w, i)
return LSTMLoop(weights, inp)
if mode == "loop10":
# Wraps 10 lstm steps into one function, and the whole loop
# into another calling the formers.
# Groups 10 steps at a time.
@function.Defun(tf.float32, tf.float32, tf.float32, *([tf.float32] * 10))
def Loop10(w, m, c, *args):
for x in args:
m, c = cell(x, m, c, w)
return m, c
@function.Defun(tf.float32, tf.float32)
def LSTMLoop10(weights, inp):
x = tf.unpack(inp, self.NUM_UNROLL)
m = tf.zeros_like(x[0])
c = tf.zeros_like(x[0])
assert self.NUM_UNROLL % 10 == 0
for i in range(0, self.NUM_UNROLL, 10):
m, c = Loop10(weights, m, c, *x[i:i + 10])
return m
return LSTMLoop10(weights, inp)
def testUnrollLSTM(self):
# Run one step of the unrolled lstm graph.
def RunForward(mode, cfg=None):
print("mode = ", mode)
g = tf.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
gdef = g.as_graph_def()
finish = time.time()
print("time: ", finish - start, " txt size: ", len(str(gdef)),
"gdef bin size: ", len(gdef.SerializeToString()))
with g.as_default(), tf.Session(config=cfg) as sess:
return sess.run(m)
mv0 = RunForward("complete")
for cfg in _OptimizerOptions():
print("cfg = ", cfg)
mv1 = RunForward("cell", cfg)
mv2 = RunForward("loop", cfg)
mv3 = RunForward("loop10", cfg)
self.assertAllClose(mv0, mv1, rtol=1e-4)
self.assertAllClose(mv0, mv2, rtol=1e-4)
self.assertAllClose(mv0, mv3, rtol=1e-4)
def testUnrollLSTMGrad(self):
# Run one step of the unrolled lstm graph.
def RunForwardBackward(mode, cfg=None):
print("mode = ", mode)
g = tf.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
loss = tf.reduce_sum(tf.square(m))
dw = tf.gradients([loss], [weights])
gdef = g.as_graph_def()
finish = time.time()
print("time: ", finish - start, " txt size: ", len(str(gdef)),
"gdef bin size: ", len(gdef.SerializeToString()))
with g.as_default(), tf.Session(config=cfg) as sess:
return sess.run(dw)
d0 = RunForwardBackward("complete")
for cfg in _OptimizerOptions():
print("cfg = ", cfg)
d1 = RunForwardBackward("cell", cfg)
d2 = RunForwardBackward("loop", cfg)
d3 = RunForwardBackward("loop10", cfg)
self.assertAllClose(d0, d1, rtol=1e-4)
self.assertAllClose(d0, d2, rtol=1e-4)
self.assertAllClose(d0, d3, rtol=1e-4)
class FunctionInlineControlTest(tf.test.TestCase):
def testFoo(self):
dtype = tf.float32
cfg = tf.ConfigProto(graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True)))
for noinline in [False, True]:
# pylint: disable=unexpected-keyword-arg
@function.Defun(dtype, noinline=noinline)
def Cell(v):
# If v is a vector [n, 1], x is a big square matrix.
x = tf.tanh(v + tf.transpose(v, [1, 0]))
return tf.reduce_sum(x, 1, keep_dims=True)
@function.Defun(dtype)
def Forward(x):
for _ in range(10):
# pylint: disable=cell-var-from-loop
x = Cell(x)
return tf.reduce_sum(x, [0, 1])
g = tf.Graph()
with g.as_default():
x = tf.placeholder(dtype)
y = Forward(x)
dx, = tf.gradients([y], [x])
np.random.seed(321)
inp = np.random.uniform(-1, 1, [16, 1]).astype(np.float32)
with tf.Session(graph=g, config=cfg) as sess:
ans = sess.run([y, dx], {x: inp})
print(ans[0], np.sum(ans[1]))
self.assertAllClose(ans[0], 255.971, rtol=1e-3)
self.assertAllClose(np.sum(ans[1]), 13.0408, rtol=1e-3)
@function.Defun(*[tf.float32] * 3)
def Linear(w, b, x):
return tf.nn.relu(tf.matmul(x, w) + b)
@function.Defun(*[tf.float32] * 5)
def Linear2(w1, b1, w2, b2, x):
return Linear(w2, b2, Linear(w1, b1, x))
class ModuleFunctionTest(tf.test.TestCase):
def testBasic(self):
with tf.Graph().as_default():
a, b, c, d, e = [tf.constant([[_]], dtype=tf.float32) for _ in range(5)]
y = Linear(a, b, c)
z = Linear2(a, b, c, d, e)
with tf.Session() as sess:
self.assertAllEqual([[1]], sess.run(y))
self.assertAllEqual([[5]], sess.run(z))
if __name__ == "__main__":
tf.test.main()
```
#### File: python/saved_model/builder.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf.any_pb2 import Any
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging
from tensorflow.python.saved_model import constants
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.util import compat
class SavedModelBuilder(object):
"""Builds the `SavedModel` protocol buffer and saves variables and assets.
The `SavedModelBuilder` class provides functionality to build a `SavedModel`
protocol buffer. Specifically, this allows multiple meta graphs to be saved as
part of a single language-neutral `SavedModel`, while sharing variables and
assets.
To build a SavedModel, the first meta graph must be saved with variables.
Subsequent meta graphs will simply be saved with their graph definitions. If
assets need to be saved and written or copied to disk, they must be provided
as part of the first meta graph to be saved. Subsequent meta graphs can
provide a subset of the initial assets to be added to the SavedModel
definition.
Each meta graph added to the SavedModel must be annotated with tags. The tags
provide a means to identify the specific meta graph to load and restore, along
with the shared set of variables and assets.
Typical usage for the `SavedModelBuilder`:
```python
...
builder = saved_model_builder.SavedModelBuilder(export_dir)
with tf.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph_and_variables(sess,
["foo-tag"],
signature_def_map=foo_signatures,
asset_collection=foo_assets)
...
with tf.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph(["bar-tag", "baz-tag"])
...
builder.save()
```
"""
def __init__(self, export_dir):
self._saved_model = saved_model_pb2.SavedModel()
self._saved_model.saved_model_schema_version = (
constants.SAVED_MODEL_SCHEMA_VERSION)
self._export_dir = export_dir
if not file_io.file_exists(export_dir):
file_io.recursive_create_dir(self._export_dir)
# Boolean to track whether variables and assets corresponding to the
# SavedModel have been saved. Specifically, the first meta graph to be added
# MUST use the add_meta_graph_and_variables() API. Subsequent add operations
# on the SavedModel MUST use the add_meta_graph() API which does not save
# weights.
self._has_saved_variables = False
def _asset_path_from_tensor(self, path_tensor):
"""Returns the filepath value stored in constant `path_tensor`.
Args:
path_tensor: Tensor of a file-path.
Returns:
The string value i.e. path of the tensor, if valid.
Raises:
TypeError if tensor does not match expected op type, dtype or value.
"""
if not isinstance(path_tensor, ops.Tensor):
raise TypeError("Asset path tensor must be a Tensor.")
if path_tensor.op.type != "Const":
raise TypeError("Asset path tensor must be of type constant.")
if path_tensor.dtype != dtypes.string:
raise TypeError("Asset path tensor must be of dtype string.")
str_values = path_tensor.op.get_attr("value").string_val
if len(str_values) != 1:
raise TypeError("Asset path tensor must be a scalar.")
return str_values[0]
def _add_asset_to_collection(self, asset_filename, asset_tensor):
"""Builds an asset proto and adds it to the asset collection of the graph.
Args:
asset_filename: The filename of the asset to be added.
asset_tensor: The asset tensor used to populate the tensor info of the
asset proto.
"""
asset_proto = meta_graph_pb2.AssetFileDef()
asset_proto.filename = asset_filename
asset_proto.tensor_info.name = asset_tensor.name
asset_any_proto = Any()
asset_any_proto.Pack(asset_proto)
ops.add_to_collection(constants.ASSETS_KEY, asset_any_proto)
def _save_and_write_assets(self, assets_collection_to_add=None):
"""Saves asset to the meta graph and writes asset files to disk.
Args:
assets_collection_to_add: The collection where the asset paths are setup.
"""
asset_source_filepath_list = self._save_assets(assets_collection_to_add)
# Return if there are no assets to write.
if len(asset_source_filepath_list) is 0:
tf_logging.info("No assets to write.")
return
assets_destination_dir = os.path.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY))
if not file_io.file_exists(assets_destination_dir):
file_io.recursive_create_dir(assets_destination_dir)
# Copy each asset from source path to destination path.
for asset_source_filepath in asset_source_filepath_list:
asset_source_filename = os.path.basename(asset_source_filepath)
asset_destination_filepath = os.path.join(
compat.as_bytes(assets_destination_dir),
compat.as_bytes(asset_source_filename))
file_io.copy(
asset_source_filepath, asset_destination_filepath, overwrite=True)
tf_logging.info("Assets written to: %s", assets_destination_dir)
def _save_assets(self, assets_collection_to_add=None):
"""Saves assets to the meta graph.
Args:
assets_collection_to_add: The collection where the asset paths are setup.
Returns:
The list of filepaths to the assets in the assets collection.
Raises:
ValueError: Indicating an invalid filepath tensor.
"""
asset_source_filepath_list = []
if assets_collection_to_add is None:
tf_logging.info("No assets to save.")
return asset_source_filepath_list
# Iterate over the supplied asset collection, build the `AssetFile` proto
# and add them to the collection with key `constants.ASSETS_KEY`, in the
# graph.
for asset_tensor in assets_collection_to_add:
asset_source_filepath = self._asset_path_from_tensor(asset_tensor)
if not asset_source_filepath:
raise ValueError("Invalid asset filepath tensor %s" % asset_tensor)
asset_source_filename = os.path.basename(asset_source_filepath)
# Build `AssetFile` proto and add it to the asset collection in the graph.
self._add_asset_to_collection(asset_source_filename, asset_tensor)
asset_source_filepath_list.append(asset_source_filepath)
tf_logging.info("Assets added to graph.")
return asset_source_filepath_list
def _tag_and_add_meta_graph(self, meta_graph_def, tags, signature_def_map):
"""Tags the meta graph def and adds it to the SavedModel.
Tags the meta graph def with the supplied tags, adds signature defs to it if
provided and appends the meta graph def to the SavedModel proto.
Args:
meta_graph_def: The meta graph def to add to the SavedModel.
tags: The set of tags to annotate the meta graph def with.
signature_def_map: The map of signature defs to be added to the meta graph
def.
"""
for tag in tags:
meta_graph_def.meta_info_def.tags.append(tag)
if signature_def_map is not None:
for key in signature_def_map:
meta_graph_def.signature_def[key].CopyFrom(signature_def_map[key])
proto_meta_graph_def = self._saved_model.meta_graphs.add()
proto_meta_graph_def.CopyFrom(meta_graph_def)
def add_meta_graph(self, tags, signature_def_map=None,
assets_collection=None):
"""Adds the current meta graph to the SavedModel.
Creates a Saver in the current scope and uses the Saver to export the meta
graph def. Invoking this API requires the `add_meta_graph_and_variables()`
API to have been invoked before.
Args:
tags: The set of tags to annotate the meta graph def with.
signature_def_map: The map of signature defs to be added to the meta graph
def.
assets_collection: Assets collection to be saved with SavedModel. Note
that this collection should be a subset of the assets saved as part of
the first meta graph in the SavedModel.
Raises:
AssertionError: If the variables for the SavedModel have not been saved
yet.
"""
if not self._has_saved_variables:
raise AssertionError(
"Variables and assets have not been saved yet. "
"Please invoke `add_meta_graph_and_variables()` first.")
# Save asset files, if any.
self._save_assets(assets_collection)
saver = tf_saver.Saver(variables.all_variables())
meta_graph_def = saver.export_meta_graph()
# Tag the meta graph def and add it to the SavedModel.
self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
def add_meta_graph_and_variables(self,
sess,
tags,
signature_def_map=None,
assets_collection=None):
"""Adds the current meta graph to the SavedModel and saves variables.
Creates a Saver to save the variables from the provided session. Exports the
corresponding meta graph def. This function assumes that the variables to be
saved have been initialized. For a given `SavedModelBuilder`, this API must
be called exactly once and for the first meta graph to save. For subsequent
meta graph defs to be added, the `add_meta_graph()` API must be used.
Args:
sess: The TensorFlow session from which to save the meta graph and
variables.
tags: The set of tags with which to save the meta graph.
signature_def_map: The map of signature def map to add to the meta graph
def.
assets_collection: Assets collection to be saved with SavedModel.
"""
if self._has_saved_variables:
raise AssertionError("Variables and assets have already been saved. "
"Please invoke `add_meta_graph()` instead.")
# Save asset files and write them to disk, if any.
self._save_and_write_assets(assets_collection)
# Create the variables sub-directory, if it does not exist.
variables_dir = os.path.join(
compat.as_text(self._export_dir),
compat.as_text(constants.VARIABLES_DIRECTORY))
if not file_io.file_exists(variables_dir):
file_io.recursive_create_dir(variables_dir)
variables_path = os.path.join(
compat.as_text(variables_dir),
compat.as_text(constants.VARIABLES_FILENAME))
# Save the variables and export meta graph def.
saver = tf_saver.Saver(variables.all_variables())
saver.save(sess, variables_path, write_meta_graph=False)
meta_graph_def = saver.export_meta_graph()
# Tag the meta graph def and add it to the SavedModel.
self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
# Mark this instance of SavedModel as having saved variables, such that
# subsequent attempts to save variables will fail.
self._has_saved_variables = True
def save(self, as_text=False):
"""Writes a `SavedModel` protocol buffer to disk.
The function writes the SavedModel protocol buffer to the export directory
in serialized format.
Args:
as_text: Writes the SavedModel protocol buffer in text format to disk.
Returns:
The path to which the SavedModel protocol buffer was written.
"""
if not file_io.file_exists(self._export_dir):
file_io.recursive_create_dir(self._export_dir)
if as_text:
path = os.path.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
file_io.write_string_to_file(path, str(self._saved_model))
else:
path = os.path.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
file_io.write_string_to_file(path, self._saved_model.SerializeToString())
tf_logging.info("SavedModel written to: %s", path)
return path
``` |
{
"source": "jianbing/wox-plugin-demo",
"score": 3
} |
#### File: plugins/Wox.Plugin.HelloWordEx/main.py
```python
from util import WoxEx, WoxAPI, load_module, Log
# 统一加载模块
with load_module():
import pyperclip
class Main(WoxEx): # 继承WoxEx
def query(self, keyword):
results = list()
results.append({
"Title": "input",
"SubTitle": keyword,
"IcoPath": "Images/ico.ico",
"JsonRPCAction": {
"method": "test_func",
"parameters": [keyword],
"dontHideAfterAction": False
}
})
return results
def test_func(self, keyword):
pyperclip.copy(keyword)
if __name__ == "__main__":
Main()
``` |
{
"source": "jianblog/dfhx",
"score": 2
} |
#### File: elk_analysis/userLoginTrack/trackUserLogin.py
```python
import mysql.connector
from elasticsearch import Elasticsearch
import elasticsearch.helpers
import pandas as pd
import numpy as np
from collections import defaultdict
import datetime
import os, re
def getLastTime(client, index):
"""
方法:查询指定索引记录的最新时间
返回:timestamp
"""
query_aggs = {
"size": 0,
"aggs": {
"most_recent": {
"max": {"field": "localtime", "format": "yyyy-MM-dd HH:mm:ss"}
}
}
}
try:
ret = client.search(index=index, body=query_aggs)
return int(ret['aggregations']['most_recent']['value']) / 1000
except Exception as e:
print("Query ELK failed for aggs max localtime")
return None
def queryRecent(client, index, time_from, time_to):
"""
方法:查询指定时间范围内的记录
time_from, time_to: datetime.datetime类型,
返回:DataFrame类型
可快速按如下生成:
cur_dt = datetime.datetime.now() #当前时间
cust_dt = datetime.datetime.strptime("2017-07-07 09:30:00", "%Y-%m-%d %H:%M:%S") #自定义时间串
tim_to = timePos.strftime("%Y-%m-%d %H:%M:%S")
tim_from = (timePos - datetime.timedelta(seconds=diff)).strftime("%Y-%m-%d %H:%M:%S")
"""
# 需要返回的字段
fields = ["localtime", "clientip", "session_id", "request_body", "url", "agent"]
query_range = {
"_source": fields,
"query": {
"bool": {
"must": [
{"range": {
"localtime": {
"from": time_from,
"to": time_to,
"format": "yyyy-MM-dd HH:mm:ss",
"time_zone": "+08:00"
}
}},
{"exists": {"field": "session_id"}}
]
}
}
}
ret = elasticsearch.helpers.scan(client, query_range, index=index, scroll='1m')
ret_generator = (r['_source'] for r in ret)
df = pd.DataFrame(ret_generator)
return df
def insertUserTrack(client, elk_index, elk_type, df_usr):
"""
方法:将用户行为记录更新到elk中
"""
actions = []
for idx in df_usr.index:
dic = df_usr.loc[idx].to_dict()
dic['_index'] = elk_index
dic['_type'] = elk_type
actions.append(dic)
elasticsearch.helpers.bulk(client, actions)
def writeToFile(filename, df):
"""
方法:将DataFrame记录以字典行写入文件
"""
with open(filename, 'a') as f:
for idx in df.index:
f.writelines(str(df.loc[idx].to_dict()).replace("'", '"') + "\n")
def filter_field(df, field_name, term_list):
"""
方法:对字段进行完整匹配过滤
参数:field_name: 字段名称
term_str:明确的字符串
返回:DataFrame类型
"""
df_term = pd.DataFrame()
for tm in term_list:
df_term = df_term.append(df[df[field_name] == tm])
return df_term
def match_field(df, field_name, patt):
"""
方法:字段正则匹配
返回:Series类型,为匹配的账号或None
"""
match_list = []
for record in df[field_name]:
ret = patt.search(record)
if ret:
match_list.append(ret.groups()[0])
else:
match_list.append(None)
return pd.Series(match_list)
def multiMatch_field(df, field_name, patt_list):
"""
方法:使用正则列表进行逐个匹配测试
返回:Series对象,与df索引相应对的匹配记录,便于同df关联
"""
match_list = []
# for idx,record in df[field_name].items():
for record in df[field_name]:
flag = 0
for patt in patt_list:
ret = patt.search(record)
if ret:
flag = 1
match_list.append(ret.groups()[0])
break # 匹配到一种即可
if not flag:
match_list.append(None)
# 组合Series,由字典创建dataframe, 时间字符串转为时间类型
return pd.Series(match_list, df.index)
'''
df_user = pd.DataFrame({'localtime':df['localtime'], 'clientip': df['clientip'], 'session_id':df['session_id'],
'agent':df['agent'], 'user_account': match_list},
columns = ['localtime','clientip','session_id','user_account','agent'])
return df_user
'''
def tim2str(t):
"""
方法: datetime类型转为固定格式字符串,用于从数据库取出时间转换
"""
try:
return t.strftime("%Y-%m-%dT%H:%M:%S+08:00")
except Exception:
return None
if __name__ == "__main__":
# 0. 初始化
# mysql连接
cn = mysql.connector.connect(user='dfhxp2p', password='<PASSWORD>', host='localhost', port='3307',
database='prod_p2p')
# elk连接
es = Elasticsearch(['ali.dev:9200'])
# 用户登录行为正则列表
patt1 = re.compile("useraccount=(.*?)&")
# 匹配样式2: 多行,useraccount换行跟Content-Length,再换行为账号
patt2 = re.compile('name="useraccount"\s+Content-Length: \d+\s+(\d{11})\s+--')
# \r\n用\s+匹配
# 匹配样式3: 多行,useraccout换行跟账号
patt3 = re.compile('name="useraccount"\s+(\d{11})\s+--')
patt_login = [patt1, patt2, patt3]
# 1.连接数据库获取最新用户列表
sql = "select user_id, user_account,user_realname, invited_by_uid,apply_time from rb_user where user_id > 1"
db_usr = pd.read_sql_query(sql, cn)
cn.close()
db_usr['user_id'] = db_usr['user_id'].astype(np.object)
db_usr['invited_by_uid'] = db_usr['invited_by_uid'].astype(np.object)
db_usr['apply_time'] = db_usr['apply_time'].apply(tim2str) # dataframe对象的 datetime格式转字符串
# 2.检索时间,确定本次执行查询的时间段范围
# 上次查询的终点时间,加1秒为本次查询的起点
ts_last_query_end = getLastTime(es, 'user_track_*')
if not ts_last_query_end:
exit
query_begin = datetime.datetime.strftime(datetime.datetime.fromtimestamp(ts_last_query_end + 1),
"%Y-%m-%d %H:%M:%S")
# 当前记录的最新时间,减去1分钟为本次查询终点
ts_cur_record_end = getLastTime(es, 'nginx_jcj_*')
if not ts_cur_record_end:
exit
query_end = datetime.datetime.strftime(datetime.datetime.fromtimestamp(ts_cur_record_end - 60), "%Y-%m-%d %H:%M:%S")
print(query_begin, query_end)
df = queryRecent(es, 'nginx_jcjact_*', query_begin, query_end)
if len(df) > 0:
df_login = filter_field(df, 'url', ['/dybuat/user/login.do','/user/login.do'])
# 通过正则规则匹配账号,并生成账号与访问信息关联的dataframe
s_usr = multiMatch_field(df_login, 'request_body', patt_login)
df_usr = pd.DataFrame({"localtime": df_login['localtime'],
"clientip": df_login['clientip'],
"session_id": df_login['session_id'],
"agent": df_login['agent'],
"user_account": s_usr})
df_usr.sort_values('localtime', inplace=True) # 排序
df_usr.reset_index(inplace=True) # 重置索引
df_usr.drop('index', axis=1, inplace=True) # 删除重置索引后生成的index列
df_usr.dropna(axis=0, inplace=True) # 账号异常未匹配,删除空值行
# 账号访问记录同数据库用户表联合生成最终记录
df_user = df_usr.merge(db_usr, on='user_account', how='left')
# print(df_user[['localtime','user_realname', 'user_account', 'invited_by_uid', 'apply_time']])
abs_path = os.path.split(os.path.realpath(__file__))[0]
writeToFile(os.path.join(abs_path, "userLogin.log"), df_user)
else:
print("no record")
``` |
{
"source": "jianblog/seleniumTest",
"score": 3
} |
#### File: seleniumTest/framework/logger.py
```python
import logging
import os.path
import time
class Logger(object):
def __init__(self,logger):
self.logger = logging.getLogger(logger)
self.logger.setLevel(logging.DEBUG)
rq = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))
log_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs")
log_name = os.path.join(log_path,rq + '.log')
fh = logging.FileHandler(log_name)
fh.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
self.logger.addHandler(fh)
#self.logger.addHandler(ch)
def getlog(self):
return self.logger
```
#### File: seleniumTest/pageobjects/jcj_login_page.py
```python
import sys
sys.path.append("..")
import os
import json
from framework.base_page import BasePage
class LoginPage(BasePage):
# 登录页面入口
login_link = "link_text=>立即登录" #"xpath=>html/body/div[2]/div[5]/div[1]/div/div[2]/a[2]"
def __init__(self, driver):
super(LoginPage, self).__init__(driver)
# loading json
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "elements_top.json"),"r", encoding="utf-8") as f:
self.map_top = json.load(f)
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "elements_login.json"),"r", encoding="utf-8") as f:
self.map_login = json.load(f)
# initial goto login page
self.to_login()
def to_login(self):
element = self.find_the_element(self.map_top['top_login'])
element.click()
self.sleep(2)
def logout(self):
element = self.find_the_element(self.map_top['top_logout'])
element.click()
self.sleep(2)
def input_account(self, value):
element = self.find_the_element(self.map_login['input_account'])
element.clear()
element.send_keys(value)
def input_password(self, password):
element = self.find_the_element(self.map_login['input_password'])
element.send_keys(password)
def click_login(self):
element = self.find_the_element(self.map_login['button_login'])
element.click()
self.sleep(2)
def success_login_message(self):
element = self.find_the_element(self.map_top['banner_message_login'])
return element
def fail_login_message(self):
element = self.find_the_element(self.map_login['message_login'])
return element
``` |
{
"source": "Jianbo-Lab/QuTE",
"score": 3
} |
#### File: QuTE/applications/extract_data.py
```python
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import urllib2
import gzip
import StringIO
from utils import detrend
def extract_data():
response = urllib2.urlopen('http://db.csail.mit.edu/labdata/data.txt.gz')
compressedFile = StringIO.StringIO()
compressedFile.write(response.read())
compressedFile.seek(0)
decompressedFile = gzip.GzipFile(fileobj=compressedFile, mode='rb')
html = decompressedFile.read()
lines = html.split('\n')
lines = [line.split(' ')[2:] for line in lines][:-1]
data = pd.DataFrame(lines, columns=['epoch','moteid',
'temperature','humidity',
'light','voltage'])
data = data.apply(pd.to_numeric)
data = data.dropna(axis = 0)
data = data[data['voltage']>0]
# Select epoches with more than 40 motes per epoch.
data=data.groupby([data['epoch']//100]).\
filter(lambda x: x['moteid'].nunique()>= 40)
return data
def average_data(data):
"""
Take means every 100 epochs.
"""
data['count'] = data['temperature'].copy()
data.columns = ['epoch','moteid','temperature_mean',
'humidity_mean','light_mean',
'voltage', 'count']
averaged_data = data.groupby([data['epoch']//100, data['moteid']]).\
agg({'temperature_mean': np.mean,
'humidity_mean': np.mean,
'light_mean': np.mean,
'voltage':np.mean,
'count': len})
# Include epoch, moteid as two columns.
averaged_data.reset_index(inplace=True)
return averaged_data
def detrend_data(averaged_data):
"""
Detrend the time series, taking away long-term effects.
"""
detrended_data = averaged_data.copy()
for moteid in range(1, 59):
for feature in ['temperature_mean','humidity_mean']:
if moteid not in [5,15,28,57]:
detrended_data.loc[detrended_data['moteid']==moteid,feature]=\
detrend(averaged_data, moteid,feature)
return detrended_data
def pivot_data(detrended_data):
grouped_by_id = detrended_data.groupby('moteid').mean()
mean_by_epoch = detrended_data.pivot(index = 'epoch',
columns = 'moteid',
values ='temperature_mean')
columns = mean_by_epoch.columns.copy()
# Fill in NA values:
for i in columns:
mean_by_epoch[i].fillna(value = grouped_by_id.loc[i,'temperature_mean'],
inplace = True)
return mean_by_epoch,columns
def extract_coordinates():
response = urllib2.urlopen('http://db.csail.mit.edu/labdata/mote_locs.txt')
html = response.read()
lines = html.split('\n')
lines = [line.split(' ') for line in lines][:-1]
coordinates = [np.array([float(line[1]),float(line[2])]) for line in lines]
return coordinates
def extract_connectivity():
response = urllib2.urlopen('http://db.csail.mit.edu/labdata/connectivity.txt')
html = response.read()
lines = html.split('\n')
lines = [line.split(' ')[1:] for line in lines][:-2]
for i,line in enumerate(lines):
if line[2]=='':
lines[i][2]='0'
return lines
```
#### File: QuTE/simulations/main.py
```python
from experiment import *
import argparse
import numpy as np
import os
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--variable',type = str,default = 'p')
parser.add_argument('--pi1',type = float,default = 0.3)
parser.add_argument('--n_samples',type = int,default = 1000)
parser.add_argument('--p',type = float,default = 0.5)
parser.add_argument('--p1',type = float,default = 0.9)
parser.add_argument('--p2',type = float,default = 0.1)
parser.add_argument('--d',type = int, default = 32)
parser.add_argument('--mu',type = float,default = 2)
parser.add_argument('--alpha',type= float,default = 0.2)
parser.add_argument('--replicate',type = int, default = 100)
parser.add_argument('--var_start',type = float,default = 0.9)
parser.add_argument('--var_stop',type = float,default = 0.999)
parser.add_argument('--var_step',type = float,default = 0.01)
# parser.add_argument('--add_classical',type = int,
# default = 0, choices = [0,1])
parser.add_argument('--experiment',type = str,
default = '1', choices = ['1','2','plot1','plot2','3','plot3','4','plot4','5','plot5'])
args = parser.parse_args()
var_range = np.arange(args.var_start,args.var_stop,args.var_step)
# add_classical = False if args.add_classical == 0 else True
if args.experiment == '1':
output = experiment1(args.variable,
var_range,
args.pi1,
args.n_samples,
args.p,
mu=args.mu,
alpha=args.alpha,
replicate=args.replicate)
elif args.experiment == '2':
output = experiment2_with_expander(variable = args.variable,
var_range = var_range,
pi1 = args.pi1,
n_samples = args.n_samples,
p = args.p,
mu = args.mu,
alpha = args.alpha,
replicate = args.replicate)
elif args.experiment == '3':
output = experiment3_with_regular(variable = args.variable,
var_range = var_range,
pi1 = args.pi1,
n_samples = args.n_samples,
d = args.d,
mu = args.mu,
alpha = args.alpha,
replicate = args.replicate)
elif args.experiment == 'plot1':
path1 = "results/experiment1/"
for file in os.listdir(path1):
if file.endswith(".save.npy"):
plot_wrt_output_experiment1(path1, file)
elif args.experiment == 'plot2':
path2 = "results/experiment2/"
for file in os.listdir(path2):
if file.endswith(".save.npy"):
plot_wrt_output_experiment2(path2, file)
elif args.experiment == 'plot3':
path3 = "results/experiment3/"
for file in os.listdir(path3):
if file.endswith(".save.npy"):
plot_wrt_output_experiment3(path3, file)
elif args.experiment == '4':
output = experiment4_with_SBM(variable = args.variable,
var_range = var_range,
pi1 = args.pi1,
n_samples = args.n_samples,
p1 = args.p1,
p2 = args.p2,
mu = args.mu,
alpha = args.alpha,
replicate = args.replicate)
elif args.experiment == 'plot4':
path4 = "results/experiment4/"
for file in os.listdir(path4):
if file.endswith(".save.npy"):
plot_wrt_output_experiment4(path4, file)
elif args.experiment == '5':
output = experiment5_with_lattice(variable = 'r',
var_range = np.arange(1,16,1),
pi1 = args.pi1,
r = 1,
generator = gt.lattice,
mu = 2,
alpha = 0.2,
replicate = args.replicate,
path = 'results/experiment5/')
elif args.experiment == 'plot5':
path5 = "results/experiment5/"
for file in os.listdir(path5):
if file.endswith(".save.npy"):
plot_wrt_output_experiment5(path5, file)
if __name__ =='__main__':
main()
```
#### File: QuTE/simulations/run_simulations.py
```python
import os
from experiment import *
from plot import *
import argparse
import numpy as np
import os
def main():
parser = argparse.ArgumentParser()
# Number of replicates.
parser.add_argument('--replicate',type = int, default = 1000)
# Which experiment to run.
parser.add_argument('--experiment',type = str,
default = 'gnp', choices = ['gnp','grid','plot_gnp','plot_grid'])
args = parser.parse_args()
if args.experiment == 'gnp':
output = experiment_on_gnp('p',
np.arange(0,1.05,0.05), 0.3, 1000, 0,
mu=2,
alpha=0.2,
replicate=args.replicate)
print 'Data have been saved to results/gnp/'
elif args.experiment == 'grid':
output = experiment_on_grid(variable = 'c',
var_range = np.arange(1,16,1),
pi1 = 0.3, c = 1, generator = gt.lattice, mu = 2, alpha = 0.2,
replicate = args.replicate)
print 'Data have been saved to results/gnp/'
elif args.experiment == 'plot_gnp':
path = "results/gnp/"
for file in os.listdir(path):
if file.endswith(".save.npy"):
plot_gnp(path, file)
print 'Figures have been saved to results/figures/'
elif args.experiment == 'plot_grid':
path = "results/grid/"
for file in os.listdir(path):
if file.endswith(".save.npy"):
plot_grid(path, file)
print 'Figures have been saved to results/figures/'
if __name__ == '__main__':
main()
``` |
{
"source": "jianboli/PortfolioAnalysis",
"score": 3
} |
#### File: PortfolioAnalysis/pyfolio/portfolio.py
```python
import numpy as np
import statsmodels.api as sm
import pandas as pd
def cumpound_pnl(r):
return (r+1).product()-1
def format_pnl(pnl_s):
"""
Construct the performance table from given monthly P&L
pnl_s: monthly returns as pandas data seriers with index the monthly end dates
"""
pnl_df = pd.DataFrame({"Monthly_End":pnl_s.index, "P&L":pnl_s.values})
pnl_df['Year'] = pnl_df['Monthly_End'].dt.year
pnl_df['Month'] = pnl_df['Monthly_End'].dt.month
pnl_df_pivot = pnl_df.pivot(index="Month", columns='Year', values='P&L')
pnl_yearly = pnl_df.groupby('Year').agg({'P&L': cumpound_pnl})
pnl_yearly.columns=["Yearly"]
pnl_yearly_cum = (pnl_yearly+1).cumprod()-1
pnl_yearly_cum.columns = ["Cumulative"]
pnl_df_pivot = pnl_df_pivot.append(pnl_yearly.T)
pnl_df_pivot = pnl_df_pivot.append(pnl_yearly_cum.T)
return pnl_df_pivot
def mdd(r):
cum = (r+1).cumprod()
peak = cum.cummax()
dd = -(cum/peak-1)
return dd.max()
def calc_net_performance(gross_perf, management_fee, incentive_fee):
"""
Calucate monthly net after incentive_fee and management_fee. Most common configuration:
- high water mark, no clawbacks
- incentive fee is applied on the performance (no benchmark based performance)
:param gross_perf: the pandas data series/table contains monthly gross performance with monthly end date as index
:param management_fee: management fee ratio
:param incentive_fee: incentive fee
:return: the net performance as data Series after all the fees
"""
# make sure the gross is a series
if isinstance(gross_perf, pd.DataFrame):
gross_perf = gross_perf.iloc[:, 0]
# make sure the index is datetime
gross_perf.index = pd.to_datetime(gross_perf.index)
gross_perf.sort_index()
prev_months_cumulative = 0
prev_ytd = 0
dec_ytd = 0
high_water_mark = 0
net = list()
for dt, gr in gross_perf.iteritems():
new_months_cumulative = (gr + 1) * (prev_months_cumulative + 1) - 1
management_fee_month = (prev_months_cumulative + 1) * (management_fee / 12)
if dt.month == 1:
dec_ytd = prev_months_cumulative
if dec_ytd > high_water_mark:
high_water_mark = dec_ytd
new_cummulative_after_management = new_months_cumulative - management_fee_month
# Calculated YTD Management for YTDPerformance Calculation
ytd_managemnt = (new_cummulative_after_management + 1) / (dec_ytd + 1) - 1
# Only charge performance when (new_cummulative_after_management - high_water_mark) > 0 i.e WaterMark exceed previous year
if incentive_fee > 0 and new_cummulative_after_management > high_water_mark:
ytd_performance = ytd_managemnt - (incentive_fee * (1 + ytd_managemnt) * ((new_cummulative_after_management - high_water_mark) / (1 + new_cummulative_after_management)))
else:
ytd_performance = ytd_managemnt
if dt.month == 1:
monthly_net = ytd_performance
else:
monthly_net = (1 + ytd_performance) / (1 + prev_ytd) - 1
net.append(monthly_net)
prev_months_cumulative = new_cummulative_after_management
prev_ytd = ytd_performance
return pd.Series(net, index=gross_perf.index)
def statistics(r, b=None, rf=None, freq=12):
"""
Generate the portfolio return matrix from the monthly P&L including the following:
Total return:
Annual Return:
Volatility
Sharpe Ratio
Percentage Positive Month/Day
MDD
Skewness
Kurtosis
If b is supplied then the following will be included
Information Ratio
Correlation with Bechmark
Alpah (Non Aunnalized)
Beta
"""
stat = []
trr = (r+1).product()-1
n = float(len(r))
vol = r.std()
if rf is None:
rf = 0
stat.append(('Total Return', trr))
stat.append(('Annual Return', (1+trr)**(freq/n)-1))
stat.append(("Volatility", vol*np.sqrt(freq)))
stat.append(('Sharp Ratio', (r.values-rf.values).mean()/vol*np.sqrt(freq)))
stat.append(('Percentage Positive Month', (r>0).sum()/n))
stat.append(('MDD', mdd(r)))
stat.append(('Skewness', r.skew()))
stat.append(('Kurtosis', r.kurtosis()))
if b is not None:
ex_return = r.values-b.values
stat.append(('Information Ratio', ex_return.mean()/ex_return.std()*np.sqrt(freq)))
stat.append(('Correlation with Bechmark', np.corrcoef(r, b)[0, 1]))
X = sm.add_constant(b.values)
param = sm.OLS(r.values, X).fit().params
stat.append(('Alpah (Non Aunnalized)', param[0]))
stat.append(('Beta', param[1]))
df = pd.DataFrame(stat, columns=['Summary', 'Stat'])
return df
if __name__ == "__main__":
test_net_fee = True
if test_net_fee:
gross = pd.DataFrame(data = [0.034107741833528, -0.00833852885640296, 0.0320612695430027, 0.0102631014311665, 0.0113370205823213, 0.0180773838954622,
0.047031427894648, 0.00295545925707486, 0.0324885986089885, 0.00865585089499699, 0.00346117358058629, 0.0081528599398073, 0.0148837779649957,
-0.00418473351008752, 0.056113255346828, 0.0205687580374805, -0.0142217587735437, 0.00766906087868691, 0.0235982274548432, -0.00915881701770105,
0.0139498718873998, 0.00555486136711258, 0.0066962328677207, -0.000910732639986089, -0.00497602777657502, 0.00174304734261033, 0.00619146958299455,
0.024184586, 0.036688053066313, 0.0115983334056298, 0.0214857037577827, 0.00458691255044807, 0.00655048461181207],
index=["30-Nov-2016", "30-Dec-2016", "31-Jan-2017", "28-Feb-2017", "31-Mar-2017", "28-Apr-2017", "31-May-2017", "30-Jun-2017", "31-Jul-2017", "31-Aug-2017",
"29-Sep-2017", "31-Oct-2017", "30-Nov-2017", "29-Dec-2017", "31-Jan-2018", "28-Feb-2018", "31-Mar-2018", "30-Apr-2018", "31-May-2018", "30-Jun-2018", "31-Jul-2018", "31-Aug-2018",
"30-Sep-2018", "31-Oct-2018", "30-Nov-2018", "31-Dec-2018", "31-Jan-2019", "28-Feb-2019", "31-Mar-2019", "30-Apr-2019", "31-May-2019", "30-Jun-2019", "31-Jul-2019"])
gross.index = pd.to_datetime(gross.index, format="%d-%b-%Y")
net = calc_net_performance(gross, 0.01, 0.1)
print("Cumulative Net: {}".format((net+1).product() - 1))
assert np.abs((net+1).product() - 1 - 0.429888256) < 1e-6, "The Net fee calcuation does not look right!"
pass
``` |
{
"source": "jianbosky/ftp_server_client",
"score": 3
} |
#### File: ftp_server/core/file_handler_bak.py
```python
import os,sys
import subprocess
BASEDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASEDIR)
from core import db_handler
from conf import setting
class file_action(object):
'''
ftp操作命令方法:
'''
def __init__(self,username,cmd_action,cmd_file,total_size=300):
self.username = username
self.total_size = total_size
self.cmd_action = cmd_action
self.cmd_file = cmd_file
self.root = db_handler.handler(setting.DATABASE,self.username,"2")
self.home = self.root
def put(self):
if self.cmd_file:
self.home = '%s\\%s'% (self.home,self.cmd_file)
# 获取目录名
d =os.path.dirname(self.home)
# 获取文件名
f =os.path.basename(self.home)
try:
os.chdir(self.home)
return "d:%s,f:%s"%(d,f)
except:
os.makedirs(self.home)
os.chdir(self.home)
return "d:%s,f:%s"%(d,f)
else:
return "请上传文件,文件不能为空"
def get(self):
if self.cmd_file:
try:
os.chdir(self.home)
except:
os.makedirs(self.home)
os.chdir(self.home)
else:
return "不存在"
return self.cmd_file
def dir(self):
li = ""
try:
os.chdir(self.root)
except:
os.makedirs(self.root)
os.chdir(self.root)
if os.listdir(os.getcwd()):
for i in os.listdir(os.getcwd()):
file = os.getcwd()+'\\'+i
if os.path.isfile(file):
# 获取文件大小
fsize = os.path.getsize(file)
li += '文件: %s 大小:%s\r\n'% (i,fsize)
else:
li += '目录:%s\r\n'%i
else:
li ="."
return li
def cd(self):
try:
os.chdir(self.root)
except:
os.makedirs(self.root)
os.chdir(self.root)
def delete(self):
try:
os.chdir(self.root)
except:
os.makedirs(self.root)
os.chdir(self.root)
if self.cmd_file == None:
self.cmd_file = "你没有输入文件名"
return self.cmd_file
else:
return self.cmd_file
def help(self):
return ("""
FTP服务器操作方法有:put--->上传文件至服务器
get--->从服务器上下载文件
dir--->查看服务器文件列表
cd---->进入指定文件夹
delete->删除文件
""")
``` |
{
"source": "jianbo-sudo/detectron2_layout",
"score": 2
} |
#### File: jianbo-sudo/detectron2_layout/upload_detectron.py
```python
from flask import Flask, render_template, request, redirect, url_for, make_response,jsonify
from werkzeug.utils import secure_filename
import time
from PIL import Image
from datetime import timedelta
import os, json, cv2, random, io
import numpy as np
import torch
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
from detectron2.data import DatasetCatalog
MetadataCatalog.get("dla_train").thing_classes = ['caption', 'figure', 'page', 'table', 'title', 'text']
# 输入是一个图片的地址,输出为一张图片,可以直接把输出通过imwrite保存。
def inference(input_path,model,model_weight):
im = cv2.imread(input_path)
#im = input
#这里的im需要是一张图片,因此如果是图片路径就需要先通过imread变成图片,如果是url就需要通过load方法。
cfg = get_cfg()
cfg.merge_from_file(model)
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this mode
cfg.MODEL.WEIGHTS = model_weight
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 6
if torch.cuda.is_available():
print('we use cuda!')
cfg.MODEL.DEVICE='cuda'
else:
print('running on cpu')
cfg.MODEL.DEVICE='cpu'
predictor = DefaultPredictor(cfg)
outputs = predictor(im)
outputs["instances"].pred_classes
outputs["instances"].pred_boxes
v = Visualizer(im[:,:,::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
outs = out.get_image()[:, :, ::-1]
if os.path.exists(input_path):
os.remove(input_path)
print('remove image')
if os.path.splitext(input_path)[-1] == ".jpg":
cv2.imwrite(input_path,outs)
print('input is a jpg file:',outs)
return input_path
else:
image_name = os.path.splitext(os.path.split(input_path)[1])[0]
print('image name:',image_name)
jpg_name = os.path.join(os.path.split(input_path)[0],image_name+'.jpg')
cv2.imwrite(jpg_name,outs)
print('convert input to jpg:',jpg_name)
return jpg_name
#input = "demo/input1.jpg"
model = "configs/DLA_mask_rcnn_X_101_32x8d_FPN_3x.yaml"
model_weight = 'model_weight/pub_model_final.pth'
#设置允许的文件格式
ALLOWED_EXTENSIONS = set(['PNG', 'JPG', 'JPGE', 'PBM','PDF'])
def allowed_file(filename):
ext = filename.rsplit('.', 1)[1]
return '.' in filename and ext.upper() in ALLOWED_EXTENSIONS
app = Flask(__name__)
# 设置静态文件缓存过期时间
#app.send_file_max_age_default = timedelta(seconds=1)
def resize_image(files):
im = Image.open(files)
(w,h) = im.size
n_w = 500
n_h = int(h/w*n_w)
return n_w,n_h
@app.route('/upload', methods=['POST', 'GET']) # 添加路由
def upload():
if request.method == 'POST':
file = request.files['file']
# 如果非法的拓展名,或者为空,或者没有.那么返回error。
if not (file and allowed_file(file.filename)):
#return jsonify({"error": "please check the input form, only accept image file and PDF."})
return render_template('upload_start2.html',warning = "Illegal input, please choose again.")
# 根据当前文件所在路径,创建一个储存image的文件夹
basepath = os.path.dirname(__file__)
file_path = os.path.join(basepath, 'static/result')
print('file path:',file_path)
if not os.path.exists(file_path):
os.makedirs(file_path, 755)
# 保存图片
file_name = secure_filename(file.filename)
upload_path = os.path.join(file_path, file_name)
file.save(upload_path)
print('file path:',file_path,'file name:',file_name,'upload path:',upload_path)
# 推断结果,并保存
infer_path = inference(upload_path,model,model_weight)
infer_name = os.path.split(infer_path)[1]
# 重新调整图片大小,使得适合屏幕
n_w,n_h = resize_image(infer_path)
print('new size is:',n_w,n_h,'file name is:',file_name)
return render_template('upload_done2.html', input_name = infer_name, new_weight = n_w, new_height = n_h)
return render_template('upload_start2.html')
if __name__ == '__main__':
# app.debug = True
app.run(host='0.0.0.0', port=5000)
``` |
{
"source": "JianboTang/blocks-EncoderDecoder",
"score": 2
} |
#### File: blocks-EncoderDecoder/utils/utils.py
```python
def sentenceSplit(data):
return data.split(u'\n');
``` |
{
"source": "JianboTang/modified_GroundHog",
"score": 3
} |
#### File: fork_process/dataPreprocess/data_extraction_2.py
```python
import numpy
import pickle
readfile1 = open('intermediate_data/post_1.txt','r');
readfile2 = open('intermediate_data/cmnt_1.txt','r');
writefile = open('intermediate_data/dictionary.pkl','w');
#writefile1 = open('intermediate_data/post_2.txt','w');
#writefile2 = open('intermediate_data/cmnt_2.txt','w');
def staticDict(dictionary,lline):
for i in xrange(len(lline)):
if lline[i] in dictionary:
dictionary[lline[i]] += 1;
else:
dictionary[lline[i]] = 1;
return dictionary
def preprocess(line):
line = line.decode("utf-8");
lline = [x for x in list(line) if x != u' '];
del lline[-1]
return lline
def dictPrint(dictionary):
for x in dictionary:
print x," : ",dictionary[x];
def main(count):
dict1 = {};
dict2 = {};
i = 0;
while i < count:
line1 = readfile1.readline();
line2 = readfile2.readline();
if not line1 or not line2:
print "touch the end of file"
break;
lline1 = preprocess(line1);
lline2 = preprocess(line2);
dict1 = staticDict(dict1,lline1);
dict2 = staticDict(dict2,lline2);
i += 1;
print "print the first dictionary"
dictPrint(dict1);
print "print the second dictionary"
dictPrint(dict2);
pickle.dump(dict1,writefile);
pickle.dump(dict2,writefile);
if __name__ == '__main__':
main(1000000);
```
#### File: dataPreprocess/result_analysis/analysis_1.py
```python
readpost = open('../used/test/post_inside.txt','r');
readcmnt = open('../used/test/cmnt_inside.txt','r');
readtran = open('../used/test/encdec_trans_inside.txt','r');
def preprocess(line):
lline = list(line.decode("utf-8"));
lline = [x for x in lline if x != u' '];
return lline
def compareSen(line1,line2):
lline1 = preprocess(line1);
lline2 = preprocess(line2);
senLen = min(len(lline1),len(lline2));
mark = True;
for i in xrange(senLen):
if lline1[i] != lline2[i]:
mark = False;
break
return mark
def main(count):
i = 0;
amount = 0;
while i < count:
line2 = readcmnt.readline();
line3 = readtran.readline();
if not line2 or not line3:
break
if compareSen(line2,line3):
amount += 1
i += 1
print "touch the end, the amount of lines is : ",i
print "the total amount of the same or part same sentences is :",amount
if __name__ == '__main__':
main(1000000);
```
#### File: fork_process/dataPreprocess/select.py
```python
readpost = open('intermediate_data/post.txt','r');
readcmnt = open('intermediate_data/cmnt.txt','r');
writepost = open('used/train/post.txt','w');
writecmnt = open('used/train/cmnt.txt','w');
outpost = open('used/test/post_outside.txt','w');
outcmnt = open('used/test/cmnt_outside.txt','w');
inpost = open('used/test/post_inside.txt','w');
incmnt = open('used/test/cmnt_inside.txt','w');
def main(count,test_count):
i = 0;
while i < count:
line1 = readpost.readline();
line2 = readcmnt.readline();
if not line1 or not line2:
print "touch the end of file"
break
writepost.write(line1);
writecmnt.write(line2);
inpost.write(line1);
incmnt.write(line2);
i += 1
while i < count + test_count:
line1 = readpost.readline();
line2 = readcmnt.readline();
if not line1 or not line2:
print "touch the end of file"
break
outpost.write(line1);
outcmnt.write(line2);
i += 1
writepost.close();
writecmnt.close();
readpost.close();
readcmnt.close();
outpost.close();
outcmnt.close();
inpost.close();
incmnt.close();
if __name__ == '__main__':
main(100000,15000);
``` |
{
"source": "jianboy/ServerManager",
"score": 3
} |
#### File: ServerManager/code/cpu_opt.py
```python
import pandas as pd
def getWij():
global machine_resources
# Wij矩阵表示第i个instance实例部署到j主机上
Wij_size = np.zeros((68219, 6000))
Wij = np.zeros_like(Wij_size)
# inst_26195, app_147, machine_1149
df3=pd.read_csv("../data/instance.csv", header=None,names=list(["instanceid", "appid", "machineid","disk"]))
df2 = pd.read_csv(machine_resources, header=None, names=list(
["machineid", "cpu", "mem", "disk", "P", "M", "PM"]), encoding="utf-8")
for i in range(0,68219):
if df3[i]["machineid"]==None:
pass
else:
# Wij[i][j]=
pass
```
#### File: ServerManager/test/test_pandas.py
```python
import pandas as pd
def t1():
a = [['a', '1.2', '4.2'], ['b', '70', '0.03'], ['x', '5', '0']]
df = pd.DataFrame(a, columns=list("ABC"))
print(df.dtypes)
print(df)
def t2():
obj = pd.Series(list('cadaabbcc'))
uniques = obj.unique()
print(obj.dtypes)
print(uniques.shape)
def t3():
df = pd.DataFrame()
df2 = pd.read_csv()
df3 = pd.Series()
pd.concat()
pd.to_datetime()
pd.merge()
pd.Timestamp
def t4():
df = pd.DataFrame(columns=list("AB"), data=[[1, 2], [3, 4]])
df["C"] = None
df["C"][1] = 2
print(df)
def t5():
ser1 = pd.Series([1, 2, 3, 4])
ser2 = pd.Series(range(4), index=["a", "b", "c", "d"])
sdata = {'Ohio': 35000, 'Texas': 71000, 'Oregon': 16000, 'Utah': 5000}
ser3 = pd.Series(sdata)
# print(ser1)
print(ser2)
# 访问Series
ser2["a"]
# 所有索引
ser2.index
# 所有值
ser2.values
def t6():
'''
切片:
:return:
'''
df = pd.DataFrame([{"A": "11", "B": "12"}, {"A": "111", "B": "121"}, {"A": "1111", "B": "1211"}])
print(df)
print(df.columns.size) # 列数 2
h, l = df.shape
print(h, l) # 3,2
print(df.iloc[:, 0].size) # 行数 3
print(df.ix[[0]].index.values[0]) # 索引值 0
print(df.ix[[0]].values[0][0]) # 第一行第一列的值 11
print(df.ix[[1]].values[0][1]) # 第二行第二列的值 121
print(df.A, df.B)
print(df["A"], df["B"])
print(df.loc["A"])
print(df.loc[df["A"] > 1])
print(df.loc[pd.isna(df["A"])] == False)
print(df[df.isna["A"]] == False) # .loc可以省略
# iloc和loc:iloc按0,1,2,3等索引每行;loc按每列的列名索引
def t7():
'''
增加一行/一列
:return:
'''
df = pd.DataFrame([{"A": "11", "B": "12"}, {"A": "1111", "B": "1211"}])
# df.insert(value=list([22, 33]))
df = df.append(pd.DataFrame([{"A": "1133", "B": "1332"}]))
print(df)
# 增加一列:
df = pd.DataFrame([{"A": "11", "B": "12"}, {"A": "1111", "B": "1211"}])
df["is"] = False
print(df)
def t8():
# 修改值不能直接引用:df3["mem"][i],而需要df3.loc["mem"][i]
df = pd.DataFrame([{"A": "11", "B": "12"}, {"A": "1111", "B": "1211"}])
df["is"] = False
# df["is"][0] = True
# df.loc[0][2] = True
# df.loc[:, "is"] = True
df.loc[0, "is"] = True
print(df)
# DataFrame循环遍历
def t9():
df = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 4, "C"]})
for row in df.itertuples():
print(row.a, row.b)
for row in df.items():
print(row[1][0], row[1][1], row[1][2])
# 不推荐
for row in df.iteritems():
print(row[1][0], row[1][1], row[1][2])
# 不推荐
for row in df:
print(df[row][0], df[row][1], df[row][2])
def t10():
for i in range(10):
print(i)
def t11():
'''
:return:
'''
df = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 4, "C"]})
print(df)
df1 = df
df2 = df.copy() # 正确
df1.a = [2, 2, 2] # 直接使用=只传址,df,df1任何更改,两个变量都更改
df.b = [3, 3, 3]
print(df1)
def t12():
'''
字符分割1 appid原来字符表示,现在去掉app_,直接用后缀数字表示。
'''
df = pd.DataFrame({'appid': ["app_1", "app_2", "app_3"], 'cpu': [3, 4, "C"]},
columns=list(["appid", "cpu"]))
# tmp = pd.to_numeric(df["appid"].str.split("_", expand=True)[1].values)
# df[['col2', 'col3']] = df[['col2', 'col3']].apply(pd.to_numeric)
df["appid"] = pd.to_numeric(df["appid"].str.split("_", expand=True)[1].values)
print(df)
def t13():
'''
字符串分割2
:return:
'''
s = pd.DataFrame(['a,b,c', 'c,d,e'])
print(s)
temp_expend_False = s[0].str.split(',')
print(temp_expend_False)
temp_expend_True = s[0].str.split(',', expand=True)
print(temp_expend_True)
print(temp_expend_True[1].values)
t12()
# result = pd.DataFrame(columns=list(["instanceid", "machineid"]), data=list())
# df = pd.DataFrame({'a': list(range(100)), 'b': [random.random() for i in range(100)]})
# index = pd.MultiIndex.from_product([list('abcd'), list(range(25))])
# df.index = index
# print(df.head())
# df.loc[('a', -1), :] = None
# df.tail()
#
# data = pd.DataFrame({'a':[1,2,3], 'b':[4,5,6]})
# data.index = pd.MultiIndex.from_tuples([('a', 1), ('b', 1), ('c', 1)])
# data
# new_df = df.append(data)
# new_df.tail()
```
#### File: ServerManager/test/t_numpy.py
```python
import numpy as np
x = np.array([[1, 2, 3], [9, 8, 7], [6, 5, 4]])
def t1():
'''
定义ndarray数组
:return:
'''
x = np.array([[1, 2, 3], [9, 8, 7], [6, 5, 4]])
print(x)
print(x.shape) # 行,列数
print(type(x)) # 类型
print(x.flags) # 返回数组内部的信息
print(x.size) # 元素个数
print(x.ndim) # 维数
def t2():
'''
操作
:return:
'''
# 转置
print(x.T)
# 切片
# 将数组变为1维数组,并获取其中的一部分数据
print(x.flat[2:6])
def t3():
'''
计算,求和/均值
:return:
'''
def t4():
'''
1e-9 科学计数法,java中类似,10^(-9)
:return:
'''
print(0.000001 < 1e-9)
t4()
```
#### File: ServerManager/twtech/config.py
```python
from configparser import ConfigParser
cf = ConfigParser()
config_path = "../conf/config.ini"
section_name = "data_file_name"
cf.read(config_path)
class Config():
def __init__(self):
pass
def getConfig(self):
return self
def setConfig(self, db_mysql, sysconfig, file):
pass
def setConfigByDB(self, db_mysql):
self.db_mysql = ""
```
#### File: ServerManager/utils/save_conf.py
```python
from configparser import ConfigParser
config_file = "../conf/config.ini"
data_path = "../data/"
section_name = "data_file_name"
cf = ConfigParser()
def write():
if not cf.has_section(section_name):
cf.add_section(section_name)
cf.set(section_name, "app_interference", data_path + "scheduling_preliminary_app_interference_20180606.csv")
cf.set(section_name, "app_resources", data_path + "scheduling_preliminary_app_resources_20180606.csv")
cf.set(section_name, "instance_deploy", data_path + "scheduling_preliminary_instance_deploy_20180606.csv")
cf.set(section_name, "machine_resources", data_path + "scheduling_preliminary_machine_resources_20180606.csv")
cf.set(section_name, "instance", data_path + "instance.csv")
cf.set(section_name, "app", data_path + "app.csv")
if not cf.has_section("table_size"):
cf.add_section("table_size")
cf.set("table_size", "app_size", "9338")
cf.set("table_size", "machine_size", "6000")
cf.set("table_size", "instance_size", "68219")
cf.set("table_size", "app12_size", "35242")
if not cf.has_section("system_config"):
cf.add_section("system_config")
cf.set("system_config", "debug", "true")
if not cf.has_section("db_mysql"):
cf.add_section("db_mysql")
cf.set("db_mysql", "db_host", "localhost")
cf.set("db_mysql", "db_port", "3306")
cf.set("db_mysql", "db_user", "root")
cf.set("db_mysql", "db_pass", "<PASSWORD>")
with open(config_file, "w") as f:
cf.write(f)
def read():
cf.read(config_file)
print(cf.get(section_name, "app_interference"))
```
#### File: ServerManager/utils/save_result.py
```python
import datetime.datetime
import pandas as pd
def save_result(df):
'''
导出数据结果
:param data:
:return:
'''
# head = ["instance", "machine"]
# data = [["ss", "aa"], ["ss", "aa"], ["ss", "aa"], ["ss", "aa"]]
# df = pd.DataFrame(data, columns=head)
df.to_csv(("../submit/submit_" + datetime.now().strftime('%Y%m%d_%H%M%S') + ".csv"), header=None,
index=False)
def marge_ab(df_a, df_b):
'''
合并数据,并导出
:param df_a:
:param df_b:
:return:
'''
path_ab = "submit_" + datetime.now().strftime('%Y%m%d_%H%M%S') + ".csv"
df_ab = pd.merge(df_a, df_b)
df_ab.to_csv(path_ab, header=None, index=False)
``` |
{
"source": "jianc65/flask-restler",
"score": 2
} |
#### File: flask-restler/tests/test_base.py
```python
import pytest
def test_app(app, client):
response = client.get('/')
assert response.data == b'OK'
def test_simple_view(app, api, client):
@api.route('/simple', methods=['GET', 'POST'])
def view(*args, **kwargs):
return {'ok': True}
assert view.methods == {'GET', 'POST'}
response = client.get('/api/v1/simple')
assert response.json == {'ok': True}
response = client.post('/api/v1/simple')
assert response.json == {'ok': True}
def test_response(app, api, client):
from flask import Response
@api.route('/response', methods=['GET', 'POST'])
def view(*args, **kwargs):
return Response('OK')
response = client.get('/api/v1/response')
assert response.data == b'OK'
def test_resource(app, api, client):
from flask_restler import Resource
@api.route
class HelloResource(Resource):
class Meta:
sorting = 'test',
def get(self, resource=None, **kwargs):
return 'Hello, %s!' % (resource and resource.title() or 'World')
assert HelloResource.meta.sorting == {'test': 'test'}
@api.route('/hello/<name>/how-are-you')
class HowAreYouResource(Resource):
def get(self, resource=None, name=None, **kwargs):
return 'Hello, %s! How are you?' % name.title()
response = client.get('/api/v1/hello')
assert response.json == 'Hello, World!'
response = client.get('/api/v1/hello/mike')
assert response.json == 'Hello, Mike!'
response = client.post('/api/v1/hello')
assert response.status_code == 405
response = client.get('/api/v1/hello/mike/how-are-you')
assert response.json == 'Hello, Mike! How are you?'
def test_resource2(api, client):
from flask import request
from flask_restler import route, Resource
DATA = [1, 2]
@api.route
class SecondResource(Resource):
methods = 'get', 'post', 'put'
class Meta:
name = 'two'
filters = 'val',
strict = True
def get_many(self, **kwargs):
return DATA
def post(self, **kwargs):
DATA.append(request.json)
return DATA
def put(self, resource=None, **kwargs):
resource = int(resource)
DATA[resource - 1] = request.json
return DATA
@route
def custom(self, **kwargs):
return self.__class__.__name__
@route('/custom22/test', methods=['get', 'post'])
def custom2(self, **kwargs):
return {'json': True}
assert SecondResource.meta.endpoints
response = client.get('/api/v1/two')
assert response.json == DATA
response = client.post_json('/api/v1/two', 3)
assert response.json == [1, 2, 3]
response = client.get('/api/v1/two?per_page=2')
assert response.json == [1, 2]
assert response.headers['x-page'] == '0'
assert response.headers['x-page-last'] == '1'
response = client.get('/api/v1/two?per_page=2&page=1')
assert response.json == [3]
response = client.put_json('/api/v1/two/2', 22)
assert response.json == [1, 22, 3]
response = client.get('/api/v1/two?where={"val": 22}')
assert response.json == [22]
response = client.get('/api/v1/two?where={"val": {"$ge": 3}}')
assert response.json == [22, 3]
response = client.get('/api/v1/two/custom')
assert response.data == b'"SecondResource"'
response = client.get('/api/v1/two/custom22/test')
assert response.json == {'json': True}
response = client.post('/api/v1/two/custom22/test')
assert response.json == {'json': True}
response = client.post('/api/v1/two/custom22/test?bla-bla=22')
assert response.status_code == 400
assert response.json['error']
assert SecondResource.meta.strict == set(
['where', 'sort', 'page', 'per_page'])
def test_pagination(api, client):
from flask_restler import Resource
DATA = list(range(1, 100))
@api.route
class TestResource(Resource):
class Meta:
per_page = 20
def get_many(self, **kwargs):
return DATA
response = client.get('/api/v1/test')
assert len(response.json) == 20
response = client.get('/api/v1/test?page=2')
assert len(response.json) == 20
assert response.json[0] == 41
def test_specs(api, client):
from flask_restler import Resource, route
@api.route
class HelloResource(Resource):
@route('/world')
def world(self, **kwargs):
return 'Hello, World!'
def get(self, resource=None, **kwargs):
return 'Hello, %s!' % (resource and resource.title() or 'World')
response = client.get('/api/v1/_specs')
assert response.json
``` |
{
"source": "jianc94538/pydevp2p",
"score": 2
} |
#### File: pydevp2p/devp2p/discovery.py
```python
import re
import time
from socket import AF_INET, AF_INET6
from repoze.lru import LRUCache
import gevent
import gevent.socket
import ipaddress
import rlp
from rlp.utils import decode_hex, is_integer, str_to_bytes, bytes_to_str, safe_ord
from gevent.server import DatagramServer
from devp2p import slogging
from devp2p import crypto
from devp2p import kademlia
from devp2p import utils
from .service import BaseService
from .upnp import add_portmap, remove_portmap
log = slogging.get_logger('p2p.discovery')
class DefectiveMessage(Exception):
pass
class InvalidSignature(DefectiveMessage):
pass
class WrongMAC(DefectiveMessage):
pass
class PacketExpired(DefectiveMessage):
pass
enc_port = lambda p: utils.ienc4(p)[-2:]
dec_port = utils.idec
import sys
PY3 = sys.version_info[0] >= 3
ip_pattern = re.compile(b"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|([0-9a-f]{0,4}:)*([0-9a-f]{0,4})?$")
class Address(object):
"""
Extend later, but make sure we deal with objects
Multiaddress
https://github.com/haypo/python-ipy
"""
def __init__(self, ip, udp_port, tcp_port=0, from_binary=False):
tcp_port = tcp_port or udp_port
if from_binary:
self.udp_port = dec_port(udp_port)
self.tcp_port = dec_port(tcp_port)
else:
assert is_integer(udp_port)
assert is_integer(tcp_port)
self.udp_port = udp_port
self.tcp_port = tcp_port
try:
# `ip` could be in binary or ascii format, independent of
# from_binary's truthy. We use ad-hoc regexp to determine format
_ip = str_to_bytes(ip)
_ip = (bytes_to_str(ip) if PY3 else unicode(ip)) if ip_pattern.match(_ip) else _ip
self._ip = ipaddress.ip_address(_ip)
except ipaddress.AddressValueError as e:
log.debug("failed to parse ip", error=e, ip=ip)
raise e
@property
def ip(self):
return str(self._ip)
def update(self, addr):
if not self.tcp_port:
self.tcp_port = addr.tcp_port
def __eq__(self, other):
# addresses equal if they share ip and udp_port
return (self.ip, self.udp_port) == (other.ip, other.udp_port)
def __repr__(self):
return 'Address(%s:%s)' % (self.ip, self.udp_port)
def to_dict(self):
return dict(ip=self.ip, udp_port=self.udp_port, tcp_port=self.tcp_port)
def to_binary(self):
"""
struct Endpoint
unsigned address; // BE encoded 32-bit or 128-bit unsigned (layer3 address; size determins ipv4 vs ipv6)
unsigned udpPort; // BE encoded 16-bit unsigned
unsigned tcpPort; // BE encoded 16-bit unsigned }
"""
return list((self._ip.packed, enc_port(self.udp_port), enc_port(self.tcp_port)))
to_endpoint = to_binary
@classmethod
def from_binary(cls, ip, udp_port, tcp_port='\x00\x00'):
return cls(ip, udp_port, tcp_port, from_binary=True)
from_endpoint = from_binary
class Node(kademlia.Node):
def __init__(self, pubkey, address=None):
kademlia.Node.__init__(self, pubkey)
assert address is None or isinstance(address, Address)
self.address = address
self.reputation = 0
self.rlpx_version = 0
@classmethod
def from_uri(cls, uri):
ip, port, pubkey = utils.host_port_pubkey_from_uri(uri)
return cls(pubkey, Address(ip, int(port)))
def to_uri(self):
return utils.host_port_pubkey_to_uri(str_to_bytes(self.address.ip),
self.address.udp_port, self.pubkey)
class DiscoveryProtocolTransport(object):
def send(self, address, message):
assert isinstance(address, Address)
def receive(self, address, message):
assert isinstance(address, Address)
class KademliaProtocolAdapter(kademlia.KademliaProtocol):
pass
"""
# Node Discovery Protocol
**Node**: an entity on the network
**Node ID**: 512 bit public key of node
The Node Discovery protocol provides a way to find RLPx nodes
that can be connected to. It uses a Kademlia-like protocol to maintain a
distributed database of the IDs and endpoints of all listening nodes.
Each node keeps a node table as described in the Kademlia paper
[[Maymounkov, Mazières 2002][kad-paper]]. The node table is configured
with a bucket size of 16 (denoted `k` in Kademlia), concurrency of 3
(denoted `α` in Kademlia), and 8 bits per hop (denoted `b` in
Kademlia) for routing. The eviction check interval is 75 milliseconds,
and the idle bucket-refresh interval is
3600 seconds.
In order to maintain a well-formed network, RLPx nodes should try to connect
to an unspecified number of close nodes. To increase resilience against Sybil attacks,
nodes should also connect to randomly chosen, non-close nodes.
Each node runs the UDP-based RPC protocol defined below. The
`FIND_DATA` and `STORE` requests from the Kademlia paper are not part
of the protocol since the Node Discovery Protocol does not provide DHT
functionality.
[kad-paper]: http://www.cs.rice.edu/Conferences/IPTPS02/109.pdf
## Joining the network
When joining the network, fills its node table by perfoming a
recursive Find Node operation with its own ID as the `Target`. The
initial Find Node request is sent to one or more bootstrap nodes.
## RPC Protocol
RLPx nodes that want to accept incoming connections should listen on
the same port number for UDP packets (Node Discovery Protocol) and
TCP connections (RLPx protocol).
All requests time out after are 300ms. Requests are not re-sent.
"""
class DiscoveryProtocol(kademlia.WireInterface):
"""
## Packet Data
All packets contain an `Expiration` date to guard against replay attacks.
The date should be interpreted as a UNIX timestamp.
The receiver should discard any packet whose `Expiration` value is in the past.
"""
version = 4
expiration = 60 # let messages expire after N secondes
cmd_id_map = dict(ping=1, pong=2, find_node=3, neighbours=4)
rev_cmd_id_map = dict((v, k) for k, v in cmd_id_map.items())
# number of required top-level list elements for each cmd_id.
# elements beyond this length are trimmed.
cmd_elem_count_map = dict(ping=4, pong=3, find_node=2, neighbours=2)
encoders = dict(cmd_id=chr,
expiration=rlp.sedes.big_endian_int.serialize)
decoders = dict(cmd_id=safe_ord,
expiration=rlp.sedes.big_endian_int.deserialize)
def __init__(self, app, transport):
self.app = app
self.transport = transport
self.privkey = decode_hex(app.config['node']['privkey_hex'])
self.pubkey = crypto.privtopub(self.privkey)
self.nodes = LRUCache(2048) # nodeid->Node, fixme should be loaded
self.this_node = Node(self.pubkey, self.transport.address)
self.kademlia = KademliaProtocolAdapter(self.this_node, wire=self)
this_enode = utils.host_port_pubkey_to_uri(self.app.config['discovery']['listen_host'],
self.app.config['discovery']['listen_port'],
self.pubkey)
log.info('starting discovery proto', this_enode=this_enode)
def get_node(self, nodeid, address=None):
"return node or create new, update address if supplied"
assert isinstance(nodeid, bytes)
assert len(nodeid) == 512 // 8
assert address or self.nodes.get(nodeid)
if not self.nodes.get(nodeid):
self.nodes.put(nodeid, Node(nodeid, address))
node = self.nodes.get(nodeid)
if address:
assert isinstance(address, Address)
node.address = address
assert node.address
return node
def sign(self, msg):
"""
signature: sign(privkey, sha3(packet-type || packet-data))
signature: sign(privkey, sha3(pubkey || packet-type || packet-data))
// implementation w/MCD
"""
msg = crypto.sha3(msg)
return crypto.sign(msg, self.privkey)
def pack(self, cmd_id, payload):
"""
UDP packets are structured as follows:
hash || signature || packet-type || packet-data
packet-type: single byte < 2**7 // valid values are [1,4]
packet-data: RLP encoded list. Packet properties are serialized in the order in
which they're defined. See packet-data below.
Offset |
0 | MDC | Ensures integrity of packet,
65 | signature | Ensures authenticity of sender, `SIGN(sender-privkey, MDC)`
97 | type | Single byte in range [1, 4] that determines the structure of Data
98 | data | RLP encoded, see section Packet Data
The packets are signed and authenticated. The sender's Node ID is determined by
recovering the public key from the signature.
sender-pubkey = ECRECOVER(Signature)
The integrity of the packet can then be verified by computing the
expected MDC of the packet as:
MDC = SHA3(sender-pubkey || type || data)
As an optimization, implementations may look up the public key by
the UDP sending address and compute MDC before recovering the sender ID.
If the MDC values do not match, the packet can be dropped.
"""
assert cmd_id in self.cmd_id_map.values()
assert isinstance(payload, list)
cmd_id = str_to_bytes(self.encoders['cmd_id'](cmd_id))
expiration = self.encoders['expiration'](int(time.time() + self.expiration))
encoded_data = rlp.encode(payload + [expiration])
signed_data = crypto.sha3(cmd_id + encoded_data)
signature = crypto.sign(signed_data, self.privkey)
# assert crypto.verify(self.pubkey, signature, signed_data)
# assert self.pubkey == crypto.ecdsa_recover(signed_data, signature)
# assert crypto.verify(self.pubkey, signature, signed_data)
assert len(signature) == 65
mdc = crypto.sha3(signature + cmd_id + encoded_data)
assert len(mdc) == 32
return mdc + signature + cmd_id + encoded_data
def unpack(self, message):
"""
macSize = 256 / 8 = 32
sigSize = 520 / 8 = 65
headSize = macSize + sigSize = 97
hash, sig, sigdata := buf[:macSize], buf[macSize:headSize], buf[headSize:]
shouldhash := crypto.Sha3(buf[macSize:])
"""
mdc = message[:32]
if mdc != crypto.sha3(message[32:]):
log.debug('packet with wrong mcd')
raise WrongMAC()
signature = message[32:97]
assert len(signature) == 65
signed_data = crypto.sha3(message[97:])
remote_pubkey = crypto.ecdsa_recover(signed_data, signature)
assert len(remote_pubkey) == 512 / 8
# if not crypto.verify(remote_pubkey, signature, signed_data):
# raise InvalidSignature()
cmd_id = self.decoders['cmd_id'](message[97])
cmd = self.rev_cmd_id_map[cmd_id]
payload = rlp.decode(message[98:], strict=False)
assert isinstance(payload, list)
# ignore excessive list elements as required by EIP-8.
payload = payload[:self.cmd_elem_count_map.get(cmd, len(payload))]
return remote_pubkey, cmd_id, payload, mdc
def receive(self, address, message):
log.debug('<<< message', address=address)
assert isinstance(address, Address)
try:
remote_pubkey, cmd_id, payload, mdc = self.unpack(message)
# Note: as of discovery version 4, expiration is the last element for all
# packets. This might not be the case for a later version, but just popping
# the last element is good enough for now.
expiration = self.decoders['expiration'](payload.pop())
if time.time() > expiration:
raise PacketExpired()
except DefectiveMessage:
return
cmd = getattr(self, 'recv_' + self.rev_cmd_id_map[cmd_id])
nodeid = remote_pubkey
remote = self.get_node(nodeid, address)
log.debug("Dispatching received message", local=self.this_node, remoteid=remote,
cmd=self.rev_cmd_id_map[cmd_id])
cmd(nodeid, payload, mdc)
def send(self, node, message):
assert node.address
log.debug('>>> message', address=node.address)
self.transport.send(node.address, message)
def send_ping(self, node):
"""
### Ping (type 0x01)
Ping packets can be sent and received at any time. The receiver should
reply with a Pong packet and update the IP/Port of the sender in its
node table.
PingNode packet-type: 0x01
PingNode packet-type: 0x01
struct PingNode <= 59 bytes
{
h256 version = 0x3; <= 1
Endpoint from; <= 23
Endpoint to; <= 23
unsigned expiration; <= 9
};
struct Endpoint <= 24 == [17,3,3]
{
unsigned address; // BE encoded 32-bit or 128-bit unsigned (layer3 address; size determins ipv4 vs ipv6)
unsigned udpPort; // BE encoded 16-bit unsigned
unsigned tcpPort; // BE encoded 16-bit unsigned
}
"""
assert isinstance(node, type(self.this_node)) and node != self.this_node
log.debug('>>> ping', remoteid=node)
version = rlp.sedes.big_endian_int.serialize(self.version)
ip = self.app.config['discovery']['listen_host']
udp_port = self.app.config['discovery']['listen_port']
tcp_port = self.app.config['p2p']['listen_port']
payload = [version,
Address(ip, udp_port, tcp_port).to_endpoint(),
node.address.to_endpoint()]
assert len(payload) == 3
message = self.pack(self.cmd_id_map['ping'], payload)
self.send(node, message)
return message[:32] # return the MDC to identify pongs
def recv_ping(self, nodeid, payload, mdc):
"""
update ip, port in node table
Addresses can only be learned by ping messages
"""
if not len(payload) == 3:
log.error('invalid ping payload', payload=payload)
return
node = self.get_node(nodeid)
log.debug('<<< ping', node=node)
remote_address = Address.from_endpoint(*payload[1]) # from address
#my_address = Address.from_endpoint(*payload[2]) # my address
self.get_node(nodeid).address.update(remote_address)
self.kademlia.recv_ping(node, echo=mdc)
def send_pong(self, node, token):
"""
### Pong (type 0x02)
Pong is the reply to a Ping packet.
Pong packet-type: 0x02
struct Pong <= 66 bytes
{
Endpoint to;
h256 echo;
unsigned expiration;
};
"""
log.debug('>>> pong', remoteid=node)
payload = [node.address.to_endpoint(), token]
assert len(payload[0][0]) in (4, 16), payload
message = self.pack(self.cmd_id_map['pong'], payload)
self.send(node, message)
def recv_pong(self, nodeid, payload, mdc):
if not len(payload) == 2:
log.error('invalid pong payload', payload=payload)
return
assert len(payload[0]) == 3, payload
# Verify address is valid
Address.from_endpoint(*payload[0])
echoed = payload[1]
if self.nodes.get(nodeid):
node = self.get_node(nodeid)
self.kademlia.recv_pong(node, echoed)
else:
log.debug('<<< unexpected pong from unkown node')
def send_find_node(self, node, target_node_id):
"""
### Find Node (type 0x03)
Find Node packets are sent to locate nodes close to a given target ID.
The receiver should reply with a Neighbors packet containing the `k`
nodes closest to target that it knows about.
FindNode packet-type: 0x03
struct FindNode <= 76 bytes
{
NodeId target; // Id of a node. The responding node will send back nodes closest to the target.
unsigned expiration;
};
"""
assert is_integer(target_node_id)
target_node_id = utils.int_to_big_endian(target_node_id).rjust(kademlia.k_pubkey_size // 8, b'\0')
assert len(target_node_id) == kademlia.k_pubkey_size // 8
log.debug('>>> find_node', remoteid=node)
message = self.pack(self.cmd_id_map['find_node'], [target_node_id])
self.send(node, message)
def recv_find_node(self, nodeid, payload, mdc):
node = self.get_node(nodeid)
log.debug('<<< find_node', remoteid=node)
assert len(payload[0]) == kademlia.k_pubkey_size / 8
target = utils.big_endian_to_int(payload[0])
self.kademlia.recv_find_node(node, target)
def send_neighbours(self, node, neighbours):
"""
### Neighbors (type 0x04)
Neighbors is the reply to Find Node. It contains up to `k` nodes that
the sender knows which are closest to the requested `Target`.
Neighbors packet-type: 0x04
struct Neighbours <= 1423
{
list nodes: struct Neighbour <= 88: 1411; 76: 1219
{
inline Endpoint endpoint;
NodeId node;
};
unsigned expiration;
};
"""
assert isinstance(neighbours, list)
assert not neighbours or isinstance(neighbours[0], Node)
nodes = []
neighbours = sorted(neighbours)
for n in neighbours:
l = n.address.to_endpoint() + [n.pubkey]
nodes.append(l)
log.debug('>>> neighbours', remoteid=node, count=len(nodes), local=self.this_node,
neighbours=neighbours)
# FIXME: don't brake udp packet size / chunk message / also when receiving
message = self.pack(self.cmd_id_map['neighbours'], [nodes[:12]]) # FIXME
self.send(node, message)
def recv_neighbours(self, nodeid, payload, mdc):
remote = self.get_node(nodeid)
assert len(payload) == 1
neighbours_lst = payload[0]
assert isinstance(neighbours_lst, list)
neighbours = []
for n in neighbours_lst:
nodeid = n.pop()
address = Address.from_endpoint(*n)
node = self.get_node(nodeid, address)
assert node.address
neighbours.append(node)
log.debug('<<< neighbours', remoteid=remote, local=self.this_node, neighbours=neighbours,
count=len(neighbours_lst))
self.kademlia.recv_neighbours(remote, neighbours)
class NodeDiscovery(BaseService, DiscoveryProtocolTransport):
"""
Persist the list of known nodes with their reputation
"""
name = 'discovery'
server = None # will be set to DatagramServer
nat_upnp = None
default_config = dict(
discovery=dict(
listen_port=30303,
listen_host='0.0.0.0',
),
node=dict(privkey_hex=''))
def __init__(self, app):
BaseService.__init__(self, app)
log.info('NodeDiscovery init')
# man setsockopt
self.protocol = DiscoveryProtocol(app=self.app, transport=self)
@property
def address(self):
ip = self.app.config['discovery']['listen_host']
port = self.app.config['discovery']['listen_port']
return Address(ip, port)
# def _send(self, address, message):
# assert isinstance(address, Address)
# sock = gevent.socket.socket(type=gevent.socket.SOCK_DGRAM)
# sock.bind(('0.0.0.0', self.address.port)) # send from our recv port
# sock.connect((address.ip, address.port))
# log.debug('sending', size=len(message), to=address)
# sock.send(message)
def send(self, address, message):
assert isinstance(address, Address)
log.debug('sending', size=len(message), to=address)
try:
self.server.sendto(message, (address.ip, address.udp_port))
except gevent.socket.error as e:
log.debug('udp write error', address=address, errno=e.errno, reason=e.strerror)
log.debug('waiting for recovery')
gevent.sleep(0.5)
def receive(self, address, message):
assert isinstance(address, Address)
self.protocol.receive(address, message)
def _handle_packet(self, message, ip_port):
try:
log.debug('handling packet', address=ip_port, size=len(message))
assert len(ip_port) == 2
address = Address(ip=ip_port[0], udp_port=ip_port[1])
self.receive(address, message)
except Exception as e:
log.debug("failed to handle discovery packet",
error=e, message=message, ip_port=ip_port)
def start(self):
log.info('starting discovery')
# start a listening server
ip = self.app.config['discovery']['listen_host']
port = self.app.config['discovery']['listen_port']
# nat port mappin
# Comment out upnp part, it lead to test hung and failed anyway.
#self.nat_upnp = add_portmap(port, 'UDP', 'Ethereum DEVP2P Discovery')
log.info('starting listener', port=port, host=ip)
self.server = DatagramServer((ip, port), handle=self._handle_packet)
self.server.start()
super(NodeDiscovery, self).start()
# bootstap
nodes = [Node.from_uri(x) for x in self.app.config['discovery']['bootstrap_nodes']]
if nodes:
self.protocol.kademlia.bootstrap(nodes)
def _run(self):
log.debug('_run called')
evt = gevent.event.Event()
evt.wait()
def stop(self):
log.info('stopping discovery')
#remove_portmap(self.nat_upnp, self.app.config['discovery']['listen_port'], 'UDP')
if self.server:
self.server.stop()
super(NodeDiscovery, self).stop()
if __name__ == '__main__':
pass
```
#### File: pydevp2p/devp2p/slogging.py
```python
import logging
import warnings
try:
from ethereum.slogging import get_logger, configure, configure_logging, getLogger
except ImportError:
warnings.warn('Ethereum not available, could not import slogging', ImportWarning)
# patch logging to support kargs
_log_orig = logging.Logger._log
def _kargs_log(self, level, msg, args, exc_info=None, extra=None, **kargs):
kwmsg = ''.join(' %s=%s' % (k, str(v)) for k, v in kargs.items())
_log_orig(self, level, str(msg) + kwmsg, args, exc_info, extra)
logging.Logger._log = _kargs_log
get_logger = logging.getLogger
# # patch logging to support kargs
# _log_orig = logging.Logger._log
# def _kargs_log(self, level, msg, args, exc_info=None, extra=None, **kargs):
# kwmsg = ''.join(' %s=%s' % (k, str(v)) for k, v in kargs.items())
# _log_orig(self, level, str(msg) + kwmsg, args, exc_info, extra)
# logging.Logger._log = _kargs_log
# get_logger = logging.getLogger
if __name__ == '__main__':
logging.basicConfig()
log = get_logger('test')
log.warn('miner.new_block', block_hash='abcdef123', nonce=2234231)
```
#### File: devp2p/tests/test_discovery.py
```python
from devp2p import discovery
from devp2p import kademlia
from devp2p import crypto
from devp2p.app import BaseApp
from rlp.utils import decode_hex, encode_hex
from devp2p.utils import remove_chars
import pytest
import gevent
import random
random.seed(42)
###############################
def test_address():
Address = discovery.Address
ipv4 = '127.98.19.21'
ipv6 = u'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b'
# hostname = 'localhost'
port = 1
a4 = Address(ipv4, port)
aa4 = Address(ipv4, port)
assert a4 == aa4
a6 = Address(ipv6, port)
aa6 = Address(ipv6, port)
assert a6 == aa6
b_a4 = a4.to_binary()
assert a4 == Address.from_binary(*b_a4)
b_a6 = a6.to_binary()
assert len(b_a6) == 3
assert a6 == Address.from_binary(*b_a6)
e_a4 = a4.to_endpoint()
assert a4 == Address.from_endpoint(*e_a4)
e_a6 = a6.to_endpoint()
assert a6 == Address.from_endpoint(*e_a6)
assert len(b_a6[0]) == 16
assert len(b_a4[0]) == 4
assert isinstance(b_a6[1], bytes)
# temporarily disabled hostname test, see commit discussion:
# https://github.com/ethereum/pydevp2p/commit/8e1f2b2ef28ecba22bf27eac346bfa7007eaf0fe
# host_a = Address(hostname, port)
# assert host_a.ip in ("127.0.0.1", "::1")
#############################
class AppMock(object):
pass
class NodeDiscoveryMock(object):
messages = [] # [(to_address, from_address, message), ...] shared between all instances
def __init__(self, host, port, seed):
self.address = discovery.Address(host, port)
config = dict(
discovery=dict(),
node=dict(privkey_hex=encode_hex(crypto.sha3(seed))),
p2p=dict(listen_port=port),
)
config_discovery = config['discovery']
config_discovery['listen_host'] = host
config_discovery['listen_port'] = port
app = AppMock()
app.config = config
self.protocol = discovery.DiscoveryProtocol(app=app, transport=self)
def send(self, address, message):
assert isinstance(address, discovery.Address)
assert address != self.address
self.messages.append((address, self.address, message))
def receive(self, address, message):
assert isinstance(address, discovery.Address)
self.protocol.receive(address, message)
def poll(self):
# try to receive a message
for i, (to_address, from_address, message) in enumerate(self.messages):
if to_address == self.address:
del self.messages[i]
self.receive(from_address, message)
def test_packing():
"""
https://github.com/ethereum/go-ethereum/blob/develop/crypto/secp256k1/secp256.go#L299
https://github.com/ethereum/go-ethereum/blob/develop/p2p/discover/udp.go#L343
"""
# get two DiscoveryProtocol instances
alice = NodeDiscoveryMock(host='127.0.0.1', port=1, seed=b'alice').protocol
bob = NodeDiscoveryMock(host='127.0.0.1', port=1, seed=b'bob').protocol
cmd_id = 3 # findnode
payload = [b'a', [b'b', b'c']]
message = alice.pack(cmd_id, payload)
r_pubkey, r_cmd_id, r_payload, mdc = bob.unpack(message)
assert r_cmd_id == cmd_id
assert r_payload == payload
assert len(r_pubkey) == len(alice.pubkey)
assert r_pubkey == alice.pubkey
def test_ping_pong():
alice = NodeDiscoveryMock(host='127.0.0.1', port=1, seed=b'alice')
bob = NodeDiscoveryMock(host='127.0.0.2', port=2, seed=b'bob')
bob_node = alice.protocol.get_node(bob.protocol.pubkey, bob.address)
alice.protocol.kademlia.ping(bob_node)
assert len(NodeDiscoveryMock.messages) == 1
# inspect message in queue
msg = NodeDiscoveryMock.messages[0][2]
remote_pubkey, cmd_id, payload, mdc = bob.protocol.unpack(msg)
assert cmd_id == alice.protocol.cmd_id_map['ping']
bob.poll() # receives ping, sends pong
assert len(NodeDiscoveryMock.messages) == 1
alice.poll() # receives pong
assert len(NodeDiscoveryMock.messages) == 0
eip8_packets = dict(
# ping packet with version 4, additional list elements
ping1=decode_hex(remove_chars('''
e9614ccfd9fc3e74360018522d30e1419a143407ffcce748de3e22116b7e8dc92ff74788c0b6663a
aa3d67d641936511c8f8d6ad8698b820a7cf9e1be7155e9a241f556658c55428ec0563514365799a
4be2be5a685a80971ddcfa80cb422cdd0101ec04cb847f000001820cfa8215a8d790000000000000
000000000000000000018208ae820d058443b9a3550102
''', ' \n\t')),
# ping packet with version 555, additional list elements and additional random data
ping2=decode_hex(remove_chars('''
577be4349c4dd26768081f58de4c6f375a7a22f3f7adda654d1428637412c3d7fe917cadc56d4e5e
7ffae1dbe3efffb9849feb71b262de37977e7c7a44e677295680e9e38ab26bee2fcbae207fba3ff3
d74069a50b902a82c9903ed37cc993c50001f83e82022bd79020010db83c4d001500000000abcdef
12820cfa8215a8d79020010db885a308d313198a2e037073488208ae82823a8443b9a355c5010203
040531b9019afde696e582a78fa8d95ea13ce3297d4afb8ba6433e4154caa5ac6431af1b80ba7602
3fa4090c408f6b4bc3701562c031041d4702971d102c9ab7fa5eed4cd6bab8f7af956f7d565ee191
7084a95398b6a21eac920fe3dd1345ec0a7ef39367ee69ddf092cbfe5b93e5e568ebc491983c09c7
6d922dc3
''', ' \n\t')),
# pong packet with additional list elements and additional random data
pong=decode_hex(remove_chars('''
09b2428d83348d27cdf7064ad9024f526cebc19e4958f0fdad87c15eb598dd61d08423e0bf66b206
9869e1724125f820d851c136684082774f870e614d95a2855d000f05d1648b2d5945470bc187c2d2
216fbe870f43ed0909009882e176a46b0102f846d79020010db885a308d313198a2e037073488208
ae82823aa0fbc914b16819237dcd8801d7e53f69e9719adecb3cc0e790c57e91ca4461c9548443b9
a355c6010203c2040506a0c969a58f6f9095004c0177a6b47f451530cab38966a25cca5cb58f0555
42124e
''', ' \n\t')),
# findnode packet with additional list elements and additional random data
findnode=decode_hex(remove_chars('''
c7c44041b9f7c7e41934417ebac9a8e1a4c6298f74553f2fcfdcae6ed6fe53163eb3d2b52e39fe91
831b8a927bf4fc222c3902202027e5e9eb812195f95d20061ef5cd31d502e47ecb61183f74a504fe
04c51e73df81f25c4d506b26db4517490103f84eb840ca634cae0d49acb401d8a4c6b6fe8c55b70d
115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be0081290476
7bf5ccd1fc7f8443b9a35582999983999999280dc62cc8255c73471e0a61da0c89acdc0e035e260a
dd7fc0c04ad9ebf3919644c91cb247affc82b69bd2ca235c71eab8e49737c937a2c396
''', ' \t\n')),
# neighbours packet with additional list elements and additional random data
neighbours=decode_hex(remove_chars('''
c679fc8fe0b8b12f06577f2e802d34f6fa257e6137a995f6f4cbfc9ee50ed3710faf6e66f932c4c8
d81d64343f429651328758b47d3dbc02c4042f0fff6946a50f4a49037a72bb550f3a7872363a83e1
b9ee6469856c24eb4ef80b7535bcf99c0004f9015bf90150f84d846321163782115c82115db84031
55e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa8291
15d224c523596b401065a97f74010610fce76382c0bf32f84984010203040101b840312c55512422
cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e82
9f04c2d314fc2d4e255e0d3bc08792b069dbf8599020010db83c4d001500000000abcdef12820d05
820d05b84038643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2
d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aacf8599020010db885a308d3
13198a2e037073488203e78203e8b8408dcab8618c3253b558d459da53bd8fa68935a719aff8b811
197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73
8443b9a355010203b525a138aa34383fec3d2719a0
''', ' \n\t')),
)
def test_eip8_packets():
disc = NodeDiscoveryMock(host='127.0.0.1', port=1, seed=b'bob').protocol
fromaddr = discovery.Address("127.0.0.1", 9999)
for packet in eip8_packets.values():
disc.unpack(packet)
# ############ test with real UDP ##################
def get_app(port, seed):
config = dict(
discovery=dict(),
node=dict(privkey_hex=encode_hex(crypto.sha3(seed))),
p2p=dict(listen_port=port),
)
config_discovery = config['discovery']
config_discovery['listen_host'] = '127.0.0.1'
config_discovery['listen_port'] = port
config_discovery['bootstrap_nodes'] = []
# create app
app = BaseApp(config)
discovery.NodeDiscovery.register_with_app(app)
return app
def test_ping_pong_udp():
alice_app = get_app(30000, b'alice')
alice_app.start()
alice_discovery = alice_app.services.discovery
bob_app = get_app(30001, b'bob')
bob_app.start()
bob_discovery = bob_app.services.discovery
gevent.sleep(0.1)
bob_node = alice_discovery.protocol.get_node(bob_discovery.protocol.pubkey,
bob_discovery.address)
assert bob_node not in alice_discovery.protocol.kademlia.routing
alice_discovery.protocol.kademlia.ping(bob_node)
assert bob_node not in alice_discovery.protocol.kademlia.routing
gevent.sleep(0.1)
bob_app.stop()
alice_app.stop()
assert bob_node in alice_discovery.protocol.kademlia.routing
# must use yield_fixture rather than fixture prior to pytest 2.10
@pytest.yield_fixture
def kademlia_timeout():
"""
Rolls back kademlia timeout after the test.
"""
# backup the previous value
k_request_timeout = kademlia.k_request_timeout
# return kademlia
yield kademlia
# restore the previous value
kademlia.k_request_timeout = k_request_timeout
def test_bootstrap_udp(kademlia_timeout):
"""
startup num_apps udp server and node applications
"""
# set timeout to something more tolerant
kademlia_timeout.k_request_timeout = 10000.
num_apps = 6
apps = []
for i in range(num_apps):
app = get_app(30002 + i, b'app%d' % i)
app.start()
apps.append(app)
gevent.sleep(0.1)
sleep_delay = 1 # we need to wait for the packets to be delivered
kproto = lambda app: app.services.discovery.protocol.kademlia
this_node = lambda app: kproto(app).this_node
boot_node = this_node(apps[0])
assert boot_node.address
for app in apps[1:]:
print('test bootstrap from=%s to=%s' % (this_node(app), boot_node))
kproto(app).bootstrap([boot_node])
gevent.sleep(sleep_delay)
gevent.sleep(sleep_delay * 2)
for app in apps[1:]:
print('test find_node from=%s' % (this_node(app)))
kproto(app).find_node(this_node(app).id)
gevent.sleep(sleep_delay)
gevent.sleep(sleep_delay * 2)
for app in apps:
app.stop()
# now all nodes should know each other
for i, app in enumerate(apps):
num = len(kproto(app).routing)
print(num)
if i < len(apps) // 2: # only the first half has enough time to get all updates
assert num >= num_apps - 1
def main():
"test connecting nodes"
# stop on every unhandled exception!
gevent.get_hub().SYSTEM_ERROR = BaseException # (KeyboardInterrupt, SystemExit, SystemError)
app = get_app(30304, b'theapp')
# app.config['p2p']['listen_host'] = '127.0.0.1'
app.config['p2p']['listen_host'] = '0.0.0.0'
print("this node is")
proto = app.services.discovery.protocol.kademlia
this_node = proto.this_node
print(encode_hex(this_node.pubkey))
# add external node
go_local = b'enode://6ed2fecb28ff17dec8647f08aa4368b57790000e0e9b33a7b91f32c41b6ca9ba21600e9a8c44248ce63a71544388c6745fa291f88f8b81e109ba3da11f7b41b9@127.0.0.1:30303'
go_bootstrap = b'enode://6cdd090303f394a1cac34ecc9f7cda18127eafa2a3a06de39f6d920b0e583e062a7362097c7c65ee490a758b442acd5c80c6fce4b148c6a391e946b45131365b@172.16.58.3:30303'
cpp_bootstrap = b'enode://24f904a876975ab5c7acbedc8ec26e6f7559b527c073c6e822049fee4df78f2e9c74840587355a068f2cdb36942679f7a377a6d8c5713ccf40b1d4b99046bba0@172.16.17.32:30303'
n1 = b'enode://1d799d32547761cf66250f94b4ac1ebfc3246ce9bd87fbf90ef8d770faf48c4d96290ea0c72183d6c1ddca3d2725dad018a6c1c5d1971dbaa182792fa937e89d@172.16.17.32:1024'
n2 = b'enode://1976e20d6ec2de2dd4df34d8e949994dc333da58c967c62ca84b4d545d3305942207565153e94367f5d571ef79ce6da93c5258e88ca14788c96fbbac40f4a4c7@192.168.127.12:30303'
n3 = b'enode://14bb48727c8a103057ba06cc010c810e9d4beef746c54d948b681218195b3f1780945300c2534d422d6069f7a0e378c450db380f8efff8b4eccbb48c0c5bb9e8@172.16.31.10:30303'
nb = b'enode://1976e20d6ec2de2dd4df34d8e949994dc333da58c967c62ca84b4d545d3305942207565153e94367f5d571ef79ce6da93c5258e88ca14788c96fbbac40f4a4c7@192.168.127.12:30303'
node_uri = cpp_bootstrap
r_node = discovery.Node.from_uri(node_uri)
print("remote node is", r_node)
# add node to the routing table
print("START & TEST BOOTSTRAP")
app.config['p2p']['bootstrap_nodes'] = [node_uri]
app.start()
gevent.sleep(2.)
print("TEST FIND_NODE")
for i in range(10):
nodeid = kademlia.random_nodeid()
assert isinstance(nodeid, type(this_node.id))
proto.find_node(nodeid)
gevent.sleep(1.)
pinged = lambda: set(n for t, n, r in proto._expected_pongs.values())
for i in range(10):
print('num nodes', len(proto.routing))
gevent.sleep(1)
# proto.find_node(this_node.id)
# for node in proto.routing:
proto.ping(r_node)
# proto.find_node(this_node.id)
print('nodes in routing')
for node in proto.routing:
print(node.to_uri())
print('nodes we are waiting for pongs')
for node in pinged():
print(node.to_uri())
if __name__ == '__main__':
import ethereum.slogging
ethereum.slogging.configure(config_string=':debug')
main()
"""
unexpected pongs from cpp client
case:
bootstrap pubkey does not match
versions would be good
i get a ping reply by 2 nodes
"""
```
#### File: devp2p/tests/test_go_signature.py
```python
from devp2p.crypto import ecdsa_sign, mk_privkey, privtopub, ecdsa_recover, ECCx
from rlp.utils import decode_hex
import pyelliptic
def test_pyelliptic_sig():
priv_seed = b'test'
priv_key = mk_privkey(priv_seed)
my_pubkey = privtopub(priv_key)
e = ECCx(raw_privkey=priv_key)
msg = 'a'
s = pyelliptic.ECC.sign(e, msg)
s2 = pyelliptic.ECC.sign(e, msg)
assert s != s2 # non deterministic
def test_go_sig():
"""
go client started with:
ethereum -port="40404" -loglevel=5 -nodekeyhex="<KEY>" -bootnodes="enode://2da47499d52d9161a778e4c711e22e8651cb90350ec066452f9516d1d11eb465d1ec42bb27ec6cd4488b8b6a1a411cb5ef83c16cbb8bee194624bb65fef0f7fd@127.0.0.1:30303"
"""
r_pubkey = decode_hex("<KEY>")
d = {'signed_data': 'a061e5b799b5bb3a3a68a7eab6ee11207d90672e796510ac455e985bd206e240',
'cmd': 'find_node',
'body': '<KEY>',
'signature': '0de032c62e30f4a9f9f07f25ac5377c5a531116147617a6c08f946c97991f351577e53ae138210bdb7447bab53f3398d746d42c64a9ce67a6248e59353f1bc6e01'}
priv_seed = b'test'
priv_key = mk_privkey(priv_seed)
assert priv_key == decode_hex("<KEY>")
my_pubkey = privtopub(priv_key)
assert my_pubkey == r_pubkey, (my_pubkey, r_pubkey)
go_body = decode_hex(d['body']) # cmd_id, rlp.encoded
import rlp
target_node_id, expiry = rlp.decode(go_body[1:])
assert target_node_id == r_pubkey # lookup for itself
go_signed_data = decode_hex(d['signed_data'])
go_signature = decode_hex(d['signature'])
my_signature = ecdsa_sign(go_signed_data, priv_key)
assert my_signature == ecdsa_sign(go_signed_data, priv_key) # deterministic k
assert len(go_signed_data) == 32 # sha3()
assert len(go_signature) == 65
assert len(my_signature) == 65 # length is okay
try:
assert my_signature == go_signature
failed = False
except:
"expected fail, go signatures are not generated with deterministic k"
failed = True
pass
assert failed
# decoding works when we signed it
assert my_pubkey == ecdsa_recover(go_signed_data, my_signature)
# problem we can not decode the pubkey from the go signature
# and go can not decode ours
ecdsa_recover(go_signed_data, go_signature)
if __name__ == '__main__':
test_go_sig()
``` |
{
"source": "JianChengBai/Django",
"score": 2
} |
#### File: apps/ouath/serializers.py
```python
from django_redis import get_redis_connection
from rest_framework import serializers
from ouath.models import OAuthQQUser
from ouath.utils import check_save_user_token
from users.models import User
class QQAuthUserSerializer(serializers.Serializer):
"""
QQ登录创建用户序列化器
"""
access_token = serializers.CharField(label='操作凭证')
mobile = serializers.RegexField(label='手机号', regex=r'^1[3-9]\d{9}$')
password = serializers.CharField(label='密码', max_length=20, min_length=8)
sms_code = serializers.CharField(label='短信验证码')
def validate(self, data):
# 检验access_token
access_token = data['access_token']
# 获取身份凭证
openid = check_save_user_token(access_token)
if not openid:
raise serializers.ValidationError('无效的access_token')
# 将openid放在校验字典中,后面会使用
data['openid'] = openid
# 检验短信验证码
mobile = data['mobile']
sms_code = data['sms_code']
redis_conn = get_redis_connection('verify_codes')
real_sms_code = redis_conn.get('sms_%s' % mobile)
if real_sms_code.decode() != sms_code:
raise serializers.ValidationError('短信验证码错误')
# 如果用户名存在,检查用户密码
try:
user = User.objects.get(mobile=mobile)
except User.DoesNotExist:
pass
else:
password = data['password']
if not user.check_password(password):
raise serializers.ValidationError('密码错误')
# 将认证后的user放进校验字典中,后续或使用
data['user'] = user
return data
def create(self, validated_data):
# 获取校验的用户
user = validated_data.get('user')
if not user:
# 用户不存在,新建用户
user = User.objects.create_user(
username=validated_data['mobile'],
password=validated_data['password'],
mobile=validated_data['mobile'],
)
# 将用户绑定openid
OAuthQQUser.objects.create(
openid=validated_data['openid'],
user=user,
)
# 返回数据
return user
``` |
{
"source": "jianchengwang/todo-ml",
"score": 3
} |
#### File: todo-ml/image-similarity-flask/extract_cnn_resnet50_rota.py
```python
import numpy as np
from tensorflow.keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from numpy import linalg as LA
from tensorflow.keras.applications.resnet50 import preprocess_input as preprocess_input_resnet
from utils import rotate_img1
class VGGNet:
def __init__(self):
self.input_shape = (224, 224, 3)
self.weight = 'imagenet'
self.pooling = 'max'
self.model_resnet = ResNet50(weights=self.weight, input_shape=(
self.input_shape[0], self.input_shape[1], self.input_shape[2]), pooling=self.pooling, include_top=False)
self.model_resnet.predict(np.zeros((1, 224, 224, 3)))
def extract_feat(self, img_path):
img = image.load_img(img_path, target_size=(
self.input_shape[0], self.input_shape[1]))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input_resnet(img)
feat = self.model_resnet.predict(img)
# print(feat.shape)
norm_feat = feat[0]/LA.norm(feat[0])
return norm_feat
def extract_feat_test(self, img_path):
norm_feats = []
angle = 0
while angle < 360:
if angle == 0:
norm_feat = self.extract_feat(img_path)
norm_feats.append(norm_feat)
else:
savePath = rotate_img1(img_path, angle)
norm_feat = self.extract_feat(savePath)
norm_feats.append(norm_feat)
angle += 10
return norm_feats
```
#### File: todo-ml/image-similarity-flask/lbp.py
```python
from skimage import feature
import numpy as np
# from sklearn.svm import LinearSVC
from sklearn import svm
import cv2
import datetime
import utils
class LocalBinaryPatterns:
def __init__(self, numPoints, radius):
# store the number of points and radius
self.numPoints = numPoints
self.radius = radius
def describe(self, image, eps=1e-7):
# compute the Local Binary Pattern representation
# of the image, and then use the LBP representation
# to build the histogram of patterns
lbp = feature.local_binary_pattern(image, self.numPoints,
self.radius, method="uniform")
(hist, _) = np.histogram(lbp.ravel(),
bins=np.arange(0, self.numPoints + 3),
range=(0, self.numPoints + 2))
# normalize the histogram
hist = hist.astype("float")
hist /= (hist.sum() + eps)
# return the histogram of Local Binary Patterns
return hist
def get_match_label(train_data, train_imgnames, test_imgpath):
desc = LocalBinaryPatterns(8, 1)
data = []
labels = []
for index, hist in enumerate(train_data):
data.append(hist)
labels.append(utils.get_label(train_imgnames[index]))
starttime = datetime.datetime.now()
svc_model = svm.SVC(C=100.0, cache_size=1000, random_state=42)
svc_model.fit(data, labels)
endtime = datetime.datetime.now()
print ('train', (endtime - starttime).seconds)
testImage = cv2.imread(test_imgpath)
gray = cv2.cvtColor(testImage, cv2.COLOR_BGR2GRAY)
hist = desc.describe(gray)
testHist = get_hist(test_imgpath)
prediction = svc_model.predict(testHist.reshape(1, -1))
return prediction[0]
def get_hist(imgpath):
desc = LocalBinaryPatterns(24, 8)
image = cv2.imread(imgpath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
hist = desc.describe(gray)
return hist
```
#### File: todo-ml/image-similarity/object_detect.py
```python
import cv2
import numpy as np
def zeroPaddingResizeCV(img, size=(224, 224), interpolation=None):
isize = img.shape
ih, iw = isize[0], isize[1]
h, w = size[0], size[1]
scale = min(w / iw, h / ih)
new_w = int(iw * scale + 0.5)
new_h = int(ih * scale + 0.5)
img = cv2.resize(img, (new_w, new_h), interpolation)
new_img = np.zeros((h, w, 3), np.uint8)
new_img[(h-new_h)//2:(h+new_h)//2, (w-new_w)//2:(w+new_w)//2] = img
return new_img
# 参照 https://blog.csdn.net/liqiancao/article/details/55670749
# step1:加载图片
image = cv2.imread("database-tea-cake/1018-1-a.jpg")
cv2.imshow("original", image)
cv2.waitKey()
# 缩放图片
image = zeroPaddingResizeCV(image, size=(224, 224))
# show image
cv2.imshow("resized", image)
cv2.waitKey()
# 转成灰度图
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# step2:用Sobel算子计算x,y方向上的梯度,之后在x方向上减去y方向上的梯度,通过这个减法,我们留下具有高水平梯度和低垂直梯度的图像区域。
gradX = cv2.Sobel(gray, cv2.CV_32F, dx=1, dy=0, ksize=- 1)
gradY = cv2.Sobel(gray, cv2.CV_32F, dx=0, dy=1, ksize=- 1)
# subtract the y-gradient from the x-gradient
gradient = cv2.subtract(gradX, gradY)
gradient = cv2.convertScaleAbs(gradient)
# show image
cv2.imshow("first", gradient)
cv2.waitKey()
# step3:去除图像上的噪声。首先使用低通滤泼器平滑图像(9 x 9内核),这将有助于平滑图像中的高频噪声。
# 低通滤波器的目标是降低图像的变化率。如将每个像素替换为该像素周围像素的均值。这样就可以平滑并替代那些强度变化明显的区域。
# 然后,对模糊图像二值化。梯度图像中不大于90的任何像素都设置为0(黑色)。 否则,像素设置为255(白色)。
# blur and threshold the image
blurred = cv2.blur(gradient, (9, 9))
_, thresh = cv2.threshold(blurred, 90, 255, cv2.THRESH_BINARY)
# SHOW IMAGE
cv2.imshow("thresh", thresh)
cv2.waitKey()
# step4:在上图中我们看到蜜蜂身体区域有很多黑色的空余,我们要用白色填充这些空余,使得后面的程序更容易识别昆虫区域,
# 这需要做一些形态学方面的操作。
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 25))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# show image
cv2.imshow("closed1", closed)
cv2.waitKey()
# step5:从上图我们发现图像上还有一些小的白色斑点,这会干扰之后的昆虫轮廓的检测,要把它们去掉。分别执行4次形态学腐蚀与膨胀。
# perform a series of erosions and dilations
closed = cv2.erode(closed, None, iterations=4)
closed = cv2.dilate(closed, None, iterations=4)
# show image
cv2.imshow("closed2", closed)
cv2.waitKey()
# step6:找出昆虫区域的轮廓。
# cv2.findContours()函数
# 第一个参数是要检索的图片,必须是为二值图,即黑白的(不是灰度图),
# 所以读取的图像要先转成灰度的,再转成二值图,我们在第三步用cv2.threshold()函数已经得到了二值图。
# 第二个参数表示轮廓的检索模式,有四种:
# 1. cv2.RETR_EXTERNAL表示只检测外轮廓
# 2. cv2.RETR_LIST检测的轮廓不建立等级关系
# 3. cv2.RETR_CCOMP建立两个等级的轮廓,上面的一层为外边界,里面的一层为内孔的边界信息。如果内孔内还有一个连通物体,这个物体的边界也在顶层。
# 4. cv2.RETR_TREE建立一个等级树结构的轮廓。
# 第三个参数为轮廓的近似方法
# cv2.CHAIN_APPROX_NONE存储所有的轮廓点,相邻的两个点的像素位置差不超过1,即max(abs(x1-x2),abs(y2-y1))==1
# cv2.CHAIN_APPROX_SIMPLE压缩水平方向,垂直方向,对角线方向的元素,只保留该方向的终点坐标,例如一个矩形轮廓只需4个点来保存轮廓信息
# cv2.findContours()函数返回两个值,一个是轮廓本身,还有一个是每条轮廓对应的属性。
# cv2.findContours()函数返回第一个值是list,list中每个元素都是图像中的一个轮廓,用numpy中的ndarray表示。
# 每一个ndarray里保存的是轮廓上的各个点的坐标。我们把list排序,点最多的那个轮廓就是我们要找的昆虫的轮廓。
# x = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# # import pdb
# # pdb.set_trace()
# _a, cnts, _b = x
# c = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
# OpenCV中通过cv2.drawContours在图像上绘制轮廓。
# 第一个参数是指明在哪幅图像上绘制轮廓
# 第二个参数是轮廓本身,在Python中是一个list
# 第三个参数指定绘制轮廓list中的哪条轮廓,如果是-1,则绘制其中的所有轮廓
# 第四个参数是轮廓线条的颜色
# 第五个参数是轮廓线条的粗细
# cv2.minAreaRect()函数:
# 主要求得包含点集最小面积的矩形,这个矩形是可以有偏转角度的,可以与图像的边界不平行。
# compute the rotated bounding box of the largest contour
# rect = cv2.minAreaRect(c)
# rect = cv2.minAreaRect(cnts[1])
# box = np.int0(cv2.boxPoints(rect))
# draw a bounding box arounded the detected barcode and display the image
# cv2.drawContours(image, [box], - 1, (0, 255, 0), 3)
# cv2.imshow("Image", image)
# cv2.imwrite("contoursImage2.jpg", image)
# cv2.waitKey(0)
# step7:裁剪。box里保存的是绿色矩形区域四个顶点的坐标。我将按下图红色矩形所示裁剪昆虫图像。
# 找出四个顶点的x,y坐标的最大最小值。新图像的高=maxY-minY,宽=maxX-minX。
# Xs = [i[0] for i in box]
# Ys = [i[1] for i in box]
# x1 = min(Xs)
# x2 = max(Xs)
# y1 = min(Ys)
# y2 = max(Ys)
# hight = y2 - y1
# width = x2 - x1
# cropImg = image[y1:y1 + hight, x1:x1 + width]
# show image
# cv2.imshow("cropImg", cropImg)
# cv2.imwrite("object_detect_crop.png", cropImg)
# cv2.waitKey()
(cnts, _) = cv2.findContours(closed.copy(),
cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
c = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
# compute the rotated bounding box of the largest contour
rect = cv2.minAreaRect(c)
# ....................................................注意opencv3用法
box = np.int0(cv2.boxPoints(rect))
# draw a bounding box arounded the detected barcode and display the image
# cv2.drawContours(image, [box], -1, (0, 255, 0), 3)# 去除就没有绿色
cv2.imshow("Image", image)
cv2.imwrite("contoursImage2.jpg", image)
cv2.waitKey(0)
Xs = [i[0] for i in box]
Ys = [i[1] for i in box]
x1 = min(Xs)
x2 = max(Xs)
y1 = min(Ys)
y2 = max(Ys)
hight = y2 - y1
width = x2 - x1
cropImg = image[y1:y1+hight, x1:x1+width]
print(y1)
print(y1+hight)
print(x1)
print(x1+width)
cv2.imshow('cropImg', cropImg)
cv2.imwrite("object_detect_crop.png", cropImg)
cv2.waitKey(0)
image = cv2.imread("object_detect_crop.png")
cv2.imshow("Original",image)
cv2.waitKey(0)
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
cv2.imshow("Gray",gray)
cv2.waitKey(0)
```
#### File: ML-For-Beginners/105-test-machine-learning-models/m1b_gradient_descent.py
```python
import numpy
class MyModel:
def __init__(self):
'''
Creates a new MyModel
'''
# Straight lines described by two parameters:
# The slop is the angle of the line
self.slope = 0
# The intercept moves the line up or down
self.intercept = 0
# The history of cost per iteration
self.cost_history = []
def predict(self, x):
'''
Estimates the target variable from the value of x
'''
return x * self.slope + self.intercept
def get_summary(self):
'''
Returns a string that summarises the model
'''
return f"y = {self.slope} * x + {self.intercept}"
def cost_function(actual, predicted):
# use the mean squared differences
return numpy.average((actual - predicted)**2)
def calculate_gradient(x, actual, predicted):
"""
This calculates gradient for a linear regession
using the Mean Squared Error cost function
"""
# The partial derivatives of MSE are as follows
# You don't need to be able to do this just yet but
# it is important to note these give you the two gradients
# that we need to train our model
error = predicted - actual
grad_intercept = numpy.mean(error) * 2
grad_slope = (x * error).mean() * 2
return grad_intercept, grad_slope
def gradient_descent(x, y, learning_rate, number_of_iterations):
"""
Performs gradient descent for a two-parameter function.
learning_rate: Larger numbers follow the gradient more aggressively
number_of_iterations: The maximum number of iterations to perform
"""
model = MyModel()
# set the initial parameter guess to 0
model.intercept = 0
model.slope = 0
model.cost_history = []
last_cost = float('inf')
for i in range(number_of_iterations):
# Calculate the predicted values
predicted = model.predict(x)
# == OPTIMISER ===
# Calculate the gradient
grad_intercept, grad_slope = calculate_gradient(x, y, predicted)
# Upx the estimation of the line
model.slope -= learning_rate * grad_slope
model.intercept -= learning_rate * grad_intercept
estimate = model.predict(x)
cost = cost_function(y, estimate)
# Upx the history of costs
model.cost_history.append(cost_function(y, estimate))
# Print the current estimation and cost every 100 iterations
if( i % 100 == 0):
print("Iteration", i, " Current estimate:", model.get_summary(), f"Cost: {cost}")
if (cost + 0.001) >= last_cost:
print("Model training complete after",i, "iterations")
break
last_cost = cost
if i == (number_of_iterations - 1):
print("Maximum number of iterations reached. Stopping training")
# # Print the final model
# print(f"Final estimate:", model.get_summary())
return model
```
#### File: todo-ml/SCDA_keras/data_loader.py
```python
import os
import numpy as np
import cv2
from PIL import Image
from keras.preprocessing import image
from keras.utils import to_categorical
from sklearn.preprocessing import LabelEncoder
import pdb
def color_process(data):
"""color preprocessing"""
return data / 128. - 1.
def image_process(img):
"""transform an image to an array, and add a batch axis"""
data = image.img_to_array(img)
data = np.expand_dims(data, axis=0)
return data
class DataLoader(object):
"""The data loader class
datapath: the path to load images
savepath: the path to save npy files
npypath: the path to load npy files
"""
def __init__(self, input_size=(224, 224, 3), datapath=None, savepath=None, npypath=None):
if datapath != None:
# load images
self.input_size = input_size
self.datapath = datapath
self.classes = os.listdir(datapath)
self.le = LabelEncoder()
self.le.fit(self.classes)
print("[INFO] data path is %s"%self.datapath)
print("[INFO] classes are: %s"%" ".join(self.classes))
print("[INFO] loading data...")
self.trainx, self.trainy, self.testx, self.testy = self.load_data()
print("[INFO] data loaded!")
print("[INFO] data statistics: train: %s test: %s"%(self.trainx.shape[0], self.testx.shape[0]))
if npypath != None:
# load npy file
#self.loadnpy()
pass
if savepath != None:
# save data as npy file
#self.savenpy
pass
def load_folder(self, path, label, padding=True):
"""load images from data path with label information."""
imglist = os.listdir(path)
for imagefile in imglist:
print("loading image file %s"%('/'.join([path, imagefile])))
img = Image.open(os.path.join(path, imagefile))
img = np.asarray(img.convert("RGB"))
if padding == True: #padding to keep the h/w ratio
h, w = img.shape[0], img.shape[1]
max_size = max(h, w)
img = cv2.copyMakeBorder(img, (max_size-h)//2, (max_size-h)//2, (max_size-w)//2, (max_size-w)//2, cv2.BORDER_CONSTANT, value = [0,0,0])
img = cv2.resize(img, (self.input_size[0], self.input_size[1]))
datax = image_process(img)
if imglist.index(imagefile)==0:
DataX = datax
else:
DataX = np.vstack((DataX,datax))
DataY = [label] * len(imglist)
DataY = self.le.transform(DataY)
DataY = DataY.reshape(DataY.shape[0], 1)
#DataY = to_categorical(DataY, num_classes=len(self.classes))
return DataX, DataY
def load_data(self, ratio=0.8):
"""load train and test data. depends on specific task
ratio: ratio of train/all
"""
DataX, DataY = self.load_folder(os.path.join(self.datapath, self.classes[0]), self.classes[0])
for class_name in self.classes[1:]:
datax, datay = self.load_folder(os.path.join(self.datapath, class_name), class_name)
DataX = np.vstack((DataX, datax))
DataY = np.vstack((DataY, datay))
indices = [i for i in range(DataX.shape[0])]
np.random.shuffle(indices) # use sklearn?
DataX = DataX[indices]
DataY = DataY[indices]
TrainX = DataX[:int(len(indices) * ratio)]
TrainY = DataY[:int(len(indices) * ratio)]
TestX = DataX[int(len(indices) * ratio):]
TestY = DataY[int(len(indices) * ratio):]
TrainX = color_process(TrainX)
TestX = color_process(TestX)
return TrainX, TrainY, TestX, TestY
def loadnpy(self):
pass
def savenpy(self):
pass
#if __name__ == "__main__":
# loader = DataLoader()
```
#### File: todo-ml/SCDA_keras/show.py
```python
import cv2
from keras.layers import Lambda, Input, Dense
from keras.models import Model, load_model
from keras import optimizers
import pdb
from keras import backend as K
import numpy as np
from keras.utils import plot_model
from scda import select_aggregate
from keras.applications.vgg16 import VGG16
def show():
test = cv2.imread('test.jpg')
test = cv2.resize(test, (224, 224))
test = test.reshape((1, 224, 224, 3))
cnn = VGG16(weights='imagenet', include_top=False, input_shape=[224, 224, 3], pooling='avg')
cnn.load_weights('vgg_model.h5')
masklayer = Lambda(lambda x: select_aggregate(x, [224, 224]))(cnn.get_layer('block5_pool').output)
model = Model(inputs=cnn.input, outputs=masklayer)
output = model.predict(test)
cropped = ((output * test)[0]).astype('uint8')
cv2.imshow('img', cropped)
cv2.imwrite("cropped.jpg", cropped)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
show()
``` |
{
"source": "jianchengwang/todo-python",
"score": 3
} |
#### File: tutorial/01_hello_world/main.py
```python
from typing import Optional
import uvicorn
from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
id: Optional[int] = None
name: str
price: float
is_offer: Optional[bool] = None
@app.get('/')
async def read_root():
return {'Hello': 'World'}
@app.get('/items/{item_id}')
async def read_item(item_id: int, q: Optional[str] = None):
return {'item_id': item_id, 'q': q}
@app.put('/items/{item_id}')
async def update_item(item_id: int, item: Item):
item.id = item_id
return item
if __name__ == '__main__':
uvicorn.run('main:app', host='127.0.0.1', port=9000, reload=True)
```
#### File: tutorial/05_request_cookie_header/main.py
```python
from typing import Optional, List
import uvicorn
from fastapi import FastAPI, Header, Cookie
app = FastAPI()
@app.get("/read_header", summary="获取 header 参数")
async def read_header(user_agent: Optional[str] = Header(None)):
return {"User-Agent": user_agent}
@app.get("/header_list", summary="重复的 header")
async def header_list(x_token: Optional[List[str]] = Header(None)):
return {"X-Token values": x_token}
@app.get("/read_cookie", summary="获取 cookie 参数")
async def read_cookie(cc_id: Optional[str] = Cookie(None)):
return {"cc_id": cc_id}
if __name__ == '__main__':
uvicorn.run('main:app', host='127.0.0.1', reload=True, port=9000)
```
#### File: tutorial/09_swagger_doc/main.py
```python
import uvicorn
from fastapi import FastAPI
app = FastAPI(version="v1.0.5", title="Hello World OpenAPI", description="这是对 hello world openapi 的描述信息.")
# 隐藏这个接口在Api文档中不在显现
@app.get("/swag01", include_in_schema=False)
def swag01():
return "success"
# 这个接口在Api文档中 标记为已经无法使用,相当于过期,不建议再使用
@app.get("/swag02", deprecated=True)
async def swag02():
return "success"
@app.get(path='/api/v1/get/user', summary='获取用户', description='这个接口是用来添加用户的', tags=['用户模块'])
def getuser():
return {"code": 0, "msg": "获取成功", "data": None}
@app.post(path='/api/v1/add/user', summary='添加用户', description='这个接口是用来获取用户信息的', tags=['用户模块'])
def adduser():
return {"code": 0, "msg": "添加成功", "data": None}
@app.put(path='/api/v1/updata/user', summary='更新用户', description='这个接口是用来更新用户信息的', tags=['用户模块'])
def updatauser():
return {"code": 0, "msg": "修改成功", "data": None}
@app.put(path='/api/v1/delete/user', summary='删除用户', description='这个接口是用来删除用户信息的', tags=['用户模块'])
def deleteuser():
return {"code": 0, "msg": "删除成功", "data": None}
@app.put(path='/api/v1/add/data', summary='新增数据', description='这个接口是用来新增数据', tags=['数据模块'])
def adddatas():
return {"code": 0, "msg": "删除成功", "data": None}
if __name__ == '__main__':
uvicorn.run('main:app', host='127.0.0.1', reload=True, port=9000)
```
#### File: 1_intro_python/chapter3/exercise.py
```python
def add_numbers(x, y):
return x + y
add_numbers(1, 2)
print(f"The product of 1 and 2 is {add_numbers(1, 2)}")
# Function Scope
x = 1
y = 2
def add_numbers(x, y):
print(f"Inside the function, x = {x} and y = {y}")
return x + y
print(f"Outside the function, x = {x} and y = {y}")
print(f"The product of 5 and 6 is {add_numbers(5, 6)}")
# Positional vs Keyword Arguments
def calculate_numbers(x, y, operation="add"):
if operation == "add":
return x + y
elif operation == "subtract":
return x - y
calculate_numbers(2, 3)
calculate_numbers(2, 3, "subtract")
calculate_numbers(2, 3, operation="subtract")
```
#### File: 2_intermediate_python/chapter3/exercise_part1.py
```python
class Vehicle:
def __init__(self, make, model, fuel="gas"):
self.make = make
self.model = model
self.fuel = fuel
daily_driver = Vehicle("Subaru", "Crosstrek")
# By default, this is how python represents our object:
print(daily_driver)
# The variables we saved to the instance are available like this:
print(f"I drive a {daily_driver.make} {daily_driver.model}. It runs on {daily_driver.fuel}.")
``` |
{
"source": "jianchuanjie/flaskblog",
"score": 2
} |
#### File: blogapp/Main/views.py
```python
from flask import render_template, redirect, flash, url_for, request
from flask import current_app
from ..models import Post
from .. import db
from . import main
@main.route('/')
def index():
page = request.args.get('page', 1, type=int)
pagination = Post.query.order_by(Post.pub_date.desc()).paginate(
page, per_page=current_app.config['POST_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('index.html', posts=posts, pagination=pagination)
@main.route('/articles/<article_id>')
def article_details(article_id):
post = Post.query.filter_by(id=article_id).first()
post.num_of_view += 1
db.session.commit()
return render_template('article_details.html', post=post)
@main.route('/about')
def about():
return render_template('about.html')
@main.route('/archives')
def archives():
page = request.args.get('page', 1, type=int)
pagination = Post.query.order_by(Post.pub_date.desc()).paginate(
page, per_page=current_app.config['ARCHIVE_PER_PAGE'],
error_out=False)
posts = pagination.items
print(posts[0].pub_date.date())
return render_template('archives.html', posts=posts, pagination=pagination)
## Error Handler Views
@main.app_errorhandler(403)
def forbidden(e):
return render_template('error.html', error=403), 403
@main.app_errorhandler(404)
def page_not_found(e):
return render_template('error.html', error=404), 404
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template('error.html', error=500), 500
``` |
{
"source": "Jian-danai/Image-Recovery",
"score": 4
} |
#### File: Jian-danai/Image-Recovery/DataMakeB.py
```python
#说明:先修正大小,后根据规则产生数据
# #产生数据##########################################################################################
# from PIL import Image
# import random
# #from pylab import *
#
# for index in range(1,2001):############################372 or 2001
# I = Image.open("F:\\DataSet\\Images_rec\\A\\labelA\\dog_" + str(index) + ".jpg")###############
# img = I.convert('L') # rgb->gray
# img = Image.open("F:\\DataSet\\Images_rec\\train_unC\\dog_"+str(index)+".jpg")##############
# list_x = []
# for k in range(0,img.size[0]):
# list_x.append(k)
# for j in range(0,img.size[1]):
# #for jk in range(0, 3):
# slice = random.sample(list_x, int(0.8 * img.size[0]))#################0.8?
# for i in range(0,img.size[0]):
# #data = (img.getpixel((i, j)))
# #print (type(data))
# #data = list(data)
# if i in slice:
# #data = 0
# img.putpixel((i,j),0)#data[0],data[1],data[2]))#255??
# #img = img.convert("RGB")
# img.save("F:\\DataSet\\Images_rec\\A\\labelA\\dog_"+str(index)+".jpg")########################
###########################################################################################################
#处理图片大小
from PIL import Image
import os.path
import glob
def convertjpg(jpgfile,outdir,width=296,height=372):
img=Image.open(jpgfile)
try:
new_img=img.resize((width,height),Image.BILINEAR)
new_img.save(os.path.join(outdir,os.path.basename(jpgfile)))
except Exception as e:
print(e)
for jpgfile in glob.glob("/Users/HZK/YBJ/image_recovery/labelB1/*.jpg"):
convertjpg(jpgfile,"/Users/HZK/YBJ/image_recovery/labelB1/")
#####################################################################
``` |
{
"source": "jiandanc/dailycheckin",
"score": 3
} |
#### File: dailycheckin/paofu/paofu.py
```python
import json
import os
import requests
from requests import utils
class PaofuCheckIn:
def __init__(self, check_item):
self.check_item = check_item
@staticmethod
def sign(session):
try:
my_proxies = {"http": "socks5://127.0.0.1:1080", "https": "socks5://127.0.0.1:1080"}
session.proxies = my_proxies
#response = session.post(url="https://paofu.cloud/user/checkin")
#print(response.text)
#current = response.json()
current = session.get(url="https://paofu.cloud/user")
sleep(1)
current = session.post(url="https://paofu.cloud/user/checkin").json()
msg = current["msg"]
except Exception as e:
print(e)
msg = f"签到状态: 签到失败\n错误信息: {e}"
return msg
def main(self):
paofu_cookie = {
item.split("=")[0]: item.split("=")[1] for item in self.check_item.get("paofu_cookie").split("; ")
}
session = requests.session()
requests.utils.add_dict_to_cookiejar(session.cookies, paofu_cookie)
session.headers.update(
{
"Accept": "application/json, text/javascript, */*; q=0.01",
"Referer": "https://paofu.cloud/user",
"sec-ch-ua": r'" Not A;Brand";v="99", "Chromium";v="90", "Microsoft Edge";v="90"',
"sec-ch-ua-mobile": "?1",
"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Mobile Safari/537.36 Edg/90.0.818.62",
"X-Requested-With": "XMLHttpRequest"}
)
sign_msg = self.sign(session=session)
msg = f"{sign_msg}"
return msg
if __name__ == "__main__":
with open(
os.path.join(os.path.dirname(os.path.dirname(__file__)), "config/config.json"), "r", encoding="utf-8"
) as f:
datas = json.loads(f.read())
_check_item = datas.get("PAOFU_COOKIE_LIST", [])[0]
print(PaofuCheckIn(check_item=_check_item).main())
``` |
{
"source": "jiandequn/code",
"score": 3
} |
#### File: week/other/renameLogTable.py
```python
import sys
import pymysql
reload(sys)
sys.setdefaultencoding('utf-8')
class mysql_init:
def __init__(self, username, password, url, database, port=3306):
self.db = pymysql.connect(url, username, password, database, port, charset='utf8')
def query(self, select):
cursor = self.db.cursor();
# 使用 execute() 方法执行 SQL 查询
self.db.ping(reconnect=True)
cursor.execute(select)
return cursor.fetchall()
def execute(self, select):
cursor = self.db.cursor();
# 使用 execute() 方法执行 SQL 查询
self.db.ping(reconnect=True)
cursor.execute(select)
def close(self):
self.db.close()
if __name__ == "__main__":
argArr = sys.argv;
params = {};
for arg in argArr:
arr = str.split(arg, "=");
if len(arr) == 2:
params[arr[0]] = arr[1];
#验证配置信息
tableArr = [("login_page_log", u"首页访问日志"), ("detail_page_log", u"详情页日志"), ("album_play_log", u"播放记录日志")];
if params.has_key("startDate") and params.has_key("endDate"):
startDate = params["startDate"]
endDate = params["endDate"]
else:
raise Exception("not found params for startDate and endDate;endDate and startDate is must.", "startDate,endDate")
if params.has_key("mysqlUrl"):
mysqlUrl = params["mysqlUrl"]
else:
raise Exception("not found param for mysqlUrl,mysqlUrl is must.", "mysqlUrl")
if params.has_key("mysqlPwd"):
mysqlPwd = params["mysqlPwd"]
else:
raise Exception("not found param for mysqlPwd,mysqlPwd is must.", "mysqlPwd")
if params.has_key("mysqlName"):
mysqlName = params["mysqlName"]
else:
raise Exception("not found param for mysqlName,mysqlName is must.", "mysqlName")
if params.has_key("mysqlDb"):
mysqlDb = params["mysqlDb"]
else:
raise Exception("not found param for mysqlDb,mysqlDb is must.", "mysqlDb")
if params.has_key("mysqlPort"):
mysqlPort = int(params["mysqlPort"])
else:
mysqlPort=3306
m = mysql_init(username=mysqlName, password=<PASSWORD>, url=mysqlUrl, database=mysqlDb, port=mysqlPort)
for at in tableArr:
t = at[0];
tname = at[1];
t_val = m.query("select SUBSTR(min(create_time),1,10),SUBSTR(max(create_time),1,10) from %s" % t);
if len(t_val) == 1 and t_val[0][0] is not None and t_val[0][1] is not None:
print t_val;
sdate = t_val[0][0].replace("-", "");
edate = t_val[0][1].replace("-", "");
renameTable = "%s_%s_%s" % (t, sdate, edate);
# 检查表是否已存在,如果存在drop table
m.execute("drop table IF EXISTS %s" % renameTable);
# 增加描述信息
renameTableSql = "rename table %s to %s;" % (t, renameTable);
print u"重命名表%s为 %s;" % (t, renameTable)
m.execute(renameTableSql); # 重命名表
m.execute("ALTER TABLE %s COMMENT '%s%s_%s';" % (renameTable, tname, sdate, edate))
createTableSql = "create table %s select * from %s where 1=2" % (t, renameTable);
m.execute(createTableSql); # 创建表结构
m.execute("ALTER TABLE %s COMMENT '%s%s_%s';" % (t, tname, startDate, endDate))
else:
print u"表%s 未查到记录,无需重新创建表" %t
m.close()
```
#### File: report/base/year.py
```python
import abc
from common.tool.action.report.base.builder import BaseBuilder
class MonthBuilder(BaseBuilder):
_metaclass__ = abc.ABCMeta
# def __init__(self, action):
# self.__action = action;
@abc.abstractmethod
def start_month(self):
"""数据按日处理中心"""
raise NotImplementedError
```
#### File: action/report/events_type_log.py
```python
from common.tool.action.report.base.day import DayBuilder
day_step="2";
class EventsTypeLog(DayBuilder):
def start_day(self):
# 需分析文件已Load完;对日期范围内数据数据进行分区和分时间存储
if not self._logic_validate(2): return;
print '开始处理对source_logs进行分类处理'
self._action._sqoopUtils.execScrpit(fileName="log_analysis/import_product_column.sqoop");
self._action._sqoopUtils.execScrpit(fileName="log_analysis/import_clean_user.sqoop")
self._action._hiveUtils.exceFile(filePath="log_analysis/eventsType.hive",
hived={"startDate": self._action._startDate.strftime("%Y-%m-%d"),
"endDate": self._action._endDate.strftime("%Y-%m-%d")});
print 'source_logs进行分类处理已完成';
```
#### File: action/report/retention_user.py
```python
import datetime
from common.tool.action.report.base.day import DayBuilder
from common.utils import Constant
day_step="5,6,7"
class RetentionUser(DayBuilder):
def start_day(self):
if self._logic_validate(5):self.__computeRetentionBy2Day();
elif self._logic_validate(6):self.__computeRetentionBy3Day();
elif self._logic_validate(7):self.__computeAddUserRetentionBy2Day();
def __computeRetentionBy2Day(self):
print '开始处理对2日留存统计'
# 开始日期和结束日期往前推1天
self.__retentionByDay("user_2day_count")
print '对2日留存统计已完成'
return None
def __computeRetentionBy3Day(self):
print '开始处理对3日留存统计'
retentionDayNum = 3;
# 开始日期和结束日期往前推2天
self.__retentionByDay("user_3day_count", retentionDayNum)
print '对3日留存统计已完成'
return None
def __computeAddUserRetentionBy2Day(self):
print '开始处理对新增用户2日留存统计'
retentionDayNum = 2;
# 开始日期和结束日期往前推2天AddUserRetentionByNumDay
self.__retentionByDay("add_user_2day_count", retentionDayNum,
hiveFile="AddUserRetentionByNumDay")
print '对新增用户2日留存统计已完成'
def __retentionByDay(self, update_field="user_2day_count", retentionDayNum=2,
hiveFile="RetentionByNumDay"):
# 开始日期和结束日期往前推2天
self._action._hiveUtils.exce("truncate table %s.app_retention_count_day" % Constant.HIVE_DATABASE) # 清空表
sd = self._action._startDate + datetime.timedelta(days=1 - retentionDayNum); #
ed = sd + datetime.timedelta(days=retentionDayNum - 1)
while ed < self._action._endDate:
startDateStr = sd.strftime("%Y-%m-%d");
endDateStr = ed.strftime("%Y-%m-%d");
self._action._hiveUtils.exceFile(filePath="report/app/retention/%s.hive" % hiveFile,
hived={"startDate": startDateStr, "endDate": endDateStr,
"dayNum": retentionDayNum});
sd = sd + datetime.timedelta(days=1); #
ed = ed + datetime.timedelta(days=1);
# 对留存用户数据进行导入
self._action._sqoopUtils.execScrpit("report/app/retention/exportRetentionUserCount.sqoop", {"count_field": update_field})
```
#### File: common/tool/ConsoleUtils.py
```python
import re
from common.tool import LinuxShell, LocalLinuxCommand
from common.utils import Constant
class ConsoleUtil(object):
def __init__(self):
if Constant.SHELL_FLAG:
self.cmdShell = LinuxShell.SSHCommand();
self.cmdShell.cmd("su hadoop")
self.cmdShell.cmd("cd %s" % Constant.SCRIPT_PATH)
def cmd(self,cmd):
if Constant.SHELL_FLAG:
self.cmdShell.cmd(cmd)
else:
s1= re.sub(r'\\[\n|\r|\t]+', "", cmd)
LocalLinuxCommand.actionLocalCmd(s1);
def close(self):
if Constant.SHELL_FLAG:
self.cmdShell.close()
```
#### File: common/valid/validator.py
```python
import abc
__author__ = "jiandequn"
""""
抽象验证器实现
"""
class Validator(object):
_metaclass__ = abc.ABCMeta
@abc.abstractproperty
def vlidate(self, val):
"""
定义当前操作表
"""
raise NotImplementedError
def vlidateParam(self, key, params={}):
val = '';
if params.has_key(key):
val = params.get(key)
while not self.vlidate(val):
val = raw_input("%s:" % key);
print val;
return self.formatValue(val);
@abc.abstractproperty
def formatValue(self, val):
"""
定义当前操作表
"""
raise NotImplementedError
```
#### File: code/hadoop_script_2.0/initTable.py
```python
from common.tool import HiveTool
from common.tool.Config import config
from common.tool.ConsoleUtils import ConsoleUtil
from common.utils import Constant
def initHiveTable():
conf = config("conf/conf.ini");
mysql_url=conf.getValue("MYSQL", "mysql_url")
mysql_port=conf.getValue("MYSQL", "mysql_port")
mysql_database=conf.getValue("MYSQL", "mysql_database")
username = conf.getValue("MYSQL", "mysql_user");
password = conf.getValue("MYSQL", "mysql_pwd");
url= "jdbc:mysql://%s:%s/%s"%(mysql_url,mysql_port,mysql_database)
hived = {
"jdbc.url":url,
"jdbc.username":username,
"jdbc.password":password,
"hive.database":Constant.HIVE_DATABASE,
"hive.province":Constant.HIVE_PROVINCE
}
console = ConsoleUtil()
hiveUtils = HiveTool.HiveUtil(console)
hiveUtils.exceFile("init_table/base.hive",hived=hived,hive_database="default")
hiveUtils.exceFile("init_table/report.hive", hived=hived)
# hiveUtils.exceFile("init_table/initTable.hive", hived=hived)
# hiveUtils.exceFile("init_table/initTable.hive", hived=hived)
# hiveUtils.exceFile("statistic/log/initLogTable.hive",hived=hived)
if Constant.conf.getValue("OTHER", "yn_task") == "1":
hiveUtils.exceFile("init_table/yunnan.hive", hived=hived)
initHiveTable();
```
#### File: lib/udtf/convertSourceLogs.py
```python
import sys
def getValue(dic, st):
return dic.pop(st, '');
for line in sys.stdin:
# for line in ["2020-01-01 06:59:55 eventsType=auth_product;mac=AC4AFE6D0A28;sn=12053500305080AC4AFE6D0A28;userId=10664624;userType=1;productId=PT20190731100641096;isEffective=1;resourceId=;operateType=auth_product"]:
arr = line.strip().split('\t');
createTime = arr[0];
dic = {i.split("=")[0]: i.split("=")[1] for i in arr[-1].split(";")};
eventsType = getValue(dic, 'eventsType');
mac = getValue(dic, 'mac');
sn = getValue(dic, 'sn');
userId = getValue(dic, 'userId');
userType = getValue(dic, 'userType');
areaCode = getValue(dic, 'areaCode');
parentColumnId = getValue(dic, 'parentColumnId');
ca = getValue(dic, 'ca');
eventsName = getValue(dic, 'eventsName');
# events_name,area_code,mac,sn,user_id,ca,user_type,parent_column_id,create_time,t_data,events_type
dataArr = [];
for k in dic.keys():
dataArr.append('%s=%s' % (k, dic[k]))
print "\t".join(
[eventsName, areaCode, mac,sn, userId, ca, userType, parentColumnId, createTime, ';'.join(dataArr), eventsType])
```
#### File: sx_hadoop_script/tool/LinuxShell.py
```python
__author__ = "jiandq"
import paramiko
# 打开数据库连接
from Config import config
"""mysql初始化"""
class SSHCommand:
def __init__(self,section="SSH"):
conf = config("conf/conf.ini");
host = conf.getValue(section, "host");
username = conf.getValue(section, "username");
password = conf.getValue(section, "password");
port = conf.getValue(section, "port");
#创建SSHClient实例对象
self.transport = paramiko.Transport((host, int(port)))
self.transport.connect(username=username, password=password);
self.ssh = None
self.chan = None
self.sftp = None
def cmd(self,command):
# 创建目录
if self.ssh is None:
self.ssh = paramiko.SSHClient()
self.ssh._transport = self.transport
self.chan = self.ssh.invoke_shell()
self.chan.settimeout(90000)
result = self.chan.recv(1024)
print result
# stdin, stdout, stderr=self.ssh.exec_command(command,bufsize=0)
self.chan.send(command+'\n')
results = self.chan.recv(1024)
# 循环获取数据
while True:
if results:
print results
if results.endswith('# ') or results.endswith('$ '):
break
results = self.chan.recv(1024)
#stdin, stdout, stderr = self.ssh.exec_command("pwd")
def upload(self, local_path, target_path):
#连接,上传
# file_name = self.create_file()
self.sftp = paramiko.SFTPClient.from_transport(self.transport)
# 将location.py 上传至服务器 /tmp/test.py
self.sftp.put(local_path, target_path);
def download(self, remote_path, local_path):
self.sftp = paramiko.SFTPClient.from_transport(self.transport)
try:
self.sftp.get(remote_path, local_path)
except Exception, e:
print e
def close(self):
if not self.chan is None:
self.chan.close();
if not self.sftp is None:
self.sftp.close();
if not self.ssh is None:
self.ssh.close();
if __name__ == "__main__":
SSHCommand().cmd();
``` |
{
"source": "jiando/rf",
"score": 3
} |
#### File: rf/har2rf/pulbiclibrary.py
```python
import json
import requests
import time
import hashlib
import random
import pymysql
__version__ = '0.1'
class PublicLibrary(object):
def __int__(self):
pass
def getCoding(self, strInput):
u"""
获取编码格式
"""
if isinstance(strInput, unicode):
return "unicode"
try:
strInput.decode("utf8")
return 'utf8'
except:
pass
try:
strInput.decode("gbk")
return 'gbk'
except:
pass
def tran2UTF8(self, strInput):
"""
转化为utf8格式
"""
strCodingFmt = self.getCoding(strInput)
if strCodingFmt == "utf8":
return strInput
elif strCodingFmt == "unicode":
return strInput.encode("utf8")
elif strCodingFmt == "gbk":
return strInput.decode("gbk").encode("utf8")
def tran2GBK(self, strInput):
"""
转化为gbk格式
"""
strCodingFmt = self.getCoding(strInput)
if strCodingFmt == "gbk":
return strInput
elif strCodingFmt == "unicode":
return strInput.encode("gbk")
elif strCodingFmt == "utf8":
return strInput.decode("utf8").encode("gbk")
def md5(self, init_str):
"""
md5加密
"""
m = hashlib.md5()
m.update(init_str)
return m.hexdigest()
def eval_dict(self, strInput):
u"""接收字符串直接转成需要类型,例
| eval dict | str |
"""
strInput = eval(strInput)
return strInput
def random_num(self, num):
"""
随机出给出数字位数的数字
"""
number = ''
for i in random.sample(range(10), int(num)):
number += ''.join(str(i))
return number
def req(
self,
login_msg,
url,
method,
data=None,
headers=None):
u"""专用,有登录状态,例
| run interface test tenant | login_msg,url,method,data,headers
"""
session = requests.Session()
url = self.tran2UTF8(url)
method = self.tran2UTF8(method)
if login_msg:
login_msg = self.eval_dict(login_msg)
md5_pwd = <PASSWORD>(login_msg['<PASSWORD>'])
login_msg['passwd'] = <PASSWORD>
if data:
data = self.eval_dict(data)
if headers:
headers = self.eval_dict(headers)
else:
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
results = 'connection error'
# 先登录
r = session.post('https://xxxxxx.cn/login',
data=json.dumps(login_msg), headers=headers)
print ("*******************************")
print (u"登录状态信息")
print (r.status_code)
print (r.content)
print ("*******************************")
try:
if method == "post":
if isinstance(data, dict):
data = json.dumps(data)
results = session.post(
url, data=data, headers=headers, verify=False)
elif method == "get":
results = session.get(
url, params=data, headers=headers, verify=False)
elif method == 'delete':
results = session.delete(url, headers=headers, verify=False)
return results
except requests.ConnectionError as e:
return e
def con_db(self, sql):
db = pymysql.connect(
host="1.1.5.2",
user="xxx",
passwd="<PASSWORD>",
db="xxx",
charset='utf8')
cursor = db.cursor()
cursor.execute(sql)
data = cursor.fetchone()
db.close()
return data
``` |
{
"source": "jian-en/oppia",
"score": 2
} |
#### File: core/controllers/cron_test.py
```python
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import ast
import datetime
import logging
from core import jobs
from core.controllers import cron
from core.domain import cron_services
from core.domain import email_manager
from core.domain import taskqueue_services
from core.platform import models
from core.tests import test_utils
import main_cron
import utils
from mapreduce import model as mapreduce_model
import webtest
(job_models, suggestion_models) = models.Registry.import_models(
[models.NAMES.job, models.NAMES.suggestion])
class SampleMapReduceJobManager(jobs.BaseMapReduceJobManager):
"""Test job that maps over the general suggestion model."""
@classmethod
def entity_classes_to_map_over(cls):
return [suggestion_models.GeneralSuggestionModel]
class CronJobTests(test_utils.GenericTestBase):
def setUp(self):
super(CronJobTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.testapp_swap = self.swap(
self, 'testapp', webtest.TestApp(main_cron.app))
self.email_subjects = []
self.email_bodies = []
def _mock_send_mail_to_admin(email_subject, email_body):
"""Mocks email_manager.send_mail_to_admin() as its not possible to
send mail with self.testapp_swap, i.e with the URLs defined in
main_cron.
"""
self.email_subjects.append(email_subject)
self.email_bodies.append(email_body)
self.send_mail_to_admin_swap = self.swap(
email_manager, 'send_mail_to_admin', _mock_send_mail_to_admin)
def test_send_mail_to_admin_on_job_success(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
with self.testapp_swap, self.send_mail_to_admin_swap:
self.get_html_response('/cron/mail/admin/job_status')
self.assertEqual(self.email_subjects, ['MapReduce status report'])
self.assertEqual(
self.email_bodies, ['All MapReduce jobs are running fine.'])
self.logout()
def test_send_mail_to_admin_on_job_failure(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
job_id = SampleMapReduceJobManager.create_new()
SampleMapReduceJobManager.enqueue(
job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_DEFAULT), 1)
self.process_and_flush_pending_mapreduce_tasks()
self.assertEqual(
SampleMapReduceJobManager.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
# Increase retries to denote a stuck job.
shard_state_model_class = mapreduce_model.ShardState
recent_job_models = shard_state_model_class.all()
for job_model in recent_job_models:
job_model.retries += 1
job_model.put()
with self.testapp_swap, self.send_mail_to_admin_swap:
self.get_html_response('/cron/mail/admin/job_status')
self.assertEqual(self.email_subjects, ['MapReduce failure alert'])
self.assertEqual(len(self.email_bodies), 1)
self.assertIn(
'5 jobs have failed in the past 25 hours. More information '
'(about at most 50 jobs; to see more, please check the logs)',
self.email_bodies[0])
self.logout()
def test_cron_dashboard_stats_handler(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
with self.testapp_swap:
self.get_html_response('/cron/users/dashboard_stats')
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
all_jobs = job_models.JobModel.get_all_unfinished_jobs(3)
self.assertEqual(len(all_jobs), 1)
self.assertEqual(all_jobs[0].job_type, 'DashboardStatsOneOffJob')
self.logout()
def test_cron_user_deletion_handler(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
with self.testapp_swap:
self.get_html_response('/cron/users/user_deletion')
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
all_jobs = job_models.JobModel.get_all_unfinished_jobs(3)
self.assertEqual(len(all_jobs), 1)
self.assertEqual(all_jobs[0].job_type, 'UserDeletionOneOffJob')
self.logout()
def test_cron_fully_complete_user_deletion_handler(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
with self.testapp_swap:
self.get_html_response('/cron/users/fully_complete_user_deletion')
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
all_jobs = job_models.JobModel.get_all_unfinished_jobs(3)
self.assertEqual(len(all_jobs), 1)
self.assertEqual(
all_jobs[0].job_type, 'FullyCompleteUserDeletionOneOffJob')
self.logout()
def test_cron_exploration_recommendations_handler(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
with self.testapp_swap:
self.get_html_response('/cron/explorations/recommendations')
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
all_jobs = job_models.JobModel.get_all_unfinished_jobs(3)
self.assertEqual(len(all_jobs), 1)
self.assertEqual(
all_jobs[0].job_type, 'ExplorationRecommendationsOneOffJob')
def test_cron_activity_search_rank_handler(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
with self.testapp_swap:
self.get_html_response('/cron/explorations/search_rank')
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
all_jobs = job_models.JobModel.get_all_unfinished_jobs(3)
self.assertEqual(len(all_jobs), 1)
self.assertEqual(all_jobs[0].job_type, 'IndexAllActivitiesJobManager')
def test_clean_data_items_of_completed_map_reduce_jobs(self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.warning()."""
observed_log_messages.append(msg % args)
logging_swap = self.swap(logging, 'warning', _mock_logging_function)
recency_msec_swap = self.swap(
cron, 'MAX_MAPREDUCE_METADATA_RETENTION_MSECS', 0)
self.login(self.ADMIN_EMAIL, is_super_admin=True)
job_id = SampleMapReduceJobManager.create_new()
SampleMapReduceJobManager.enqueue(
job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_DEFAULT), 1)
self.process_and_flush_pending_mapreduce_tasks()
self.assertEqual(
SampleMapReduceJobManager.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_DEFAULT), 0)
with self.testapp_swap, logging_swap, recency_msec_swap:
self.get_html_response('/cron/jobs/cleanup')
self.assertEqual(
observed_log_messages,
[
'1 MR jobs cleaned up.',
'Deletion jobs for auxiliary MapReduce entities kicked off.',
'Deletion jobs for JobModels entities kicked off.'
]
)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_DEFAULT), 1)
self.process_and_flush_pending_mapreduce_tasks()
def test_cannot_clean_data_item_of_jobs_with_existing_running_cleanup_job(
self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.warning()."""
observed_log_messages.append(msg % args)
logging_swap = self.swap(logging, 'warning', _mock_logging_function)
self.login(self.ADMIN_EMAIL, is_super_admin=True)
job_id = cron_services.MapReduceStateModelsCleanupManager.create_new()
cron_services.MapReduceStateModelsCleanupManager.enqueue(job_id)
self.run_but_do_not_flush_pending_mapreduce_tasks()
self.assertEqual(
cron_services.MapReduceStateModelsCleanupManager
.get_status_code(job_id),
jobs.STATUS_CODE_STARTED)
with self.testapp_swap, logging_swap:
self.get_html_response('/cron/jobs/cleanup')
self.assertEqual(
observed_log_messages,
[
'0 MR jobs cleaned up.',
'A previous cleanup job is still running.',
'Deletion jobs for JobModels entities kicked off.'
]
)
def test_cannot_run_job_models_cleanup_with_existing_running_cleanup_job(
self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.warning()."""
observed_log_messages.append(msg % args)
logging_swap = self.swap(logging, 'warning', _mock_logging_function)
self.login(self.ADMIN_EMAIL, is_super_admin=True)
job_id = cron_services.JobModelsCleanupManager.create_new()
cron_services.JobModelsCleanupManager.enqueue(job_id)
self.run_but_do_not_flush_pending_mapreduce_tasks()
self.assertEqual(
cron_services.JobModelsCleanupManager.get_status_code(job_id),
jobs.STATUS_CODE_STARTED)
with self.testapp_swap, logging_swap:
self.get_html_response('/cron/jobs/cleanup')
self.assertEqual(
observed_log_messages,
[
'0 MR jobs cleaned up.',
'Deletion jobs for auxiliary MapReduce entities kicked off.',
'A previous JobModels cleanup job is still running.'
]
)
class JobModelsCleanupManagerTests(test_utils.GenericTestBase):
JOB_1_ID = 'job_1_id'
JOB_2_ID = 'job_2_id'
JOB_3_ID = 'job_3_id'
THIRTEEN_WEEKS = datetime.timedelta(weeks=13)
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = cron_services.JobModelsCleanupManager.create_new()
cron_services.JobModelsCleanupManager.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
cron_services.JobModelsCleanupManager.get_output(job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
return eval_output
def setUp(self):
super(JobModelsCleanupManagerTests, self).setUp()
self.now_in_millisecs = utils.get_time_in_millisecs(
datetime.datetime.utcnow())
date_thirteen_weeks_ago = (
datetime.datetime.utcnow() - self.THIRTEEN_WEEKS)
self.thirteen_weeks_ago_in_millisecs = utils.get_time_in_millisecs(
date_thirteen_weeks_ago)
def test_delete_job_model_completed_older_than_12_weeks(self):
job_models.JobModel(
id=self.JOB_1_ID,
time_finished_msec=self.thirteen_weeks_ago_in_millisecs,
status_code=job_models.STATUS_CODE_COMPLETED
).put()
output = self._run_one_off_job()
self.assertItemsEqual(
output, [['SUCCESS_DELETED', 1], ['SUCCESS_KEPT', 1]])
self.assertIsNone(job_models.JobModel.get_by_id(self.JOB_1_ID))
def test_delete_job_model_failed_older_than_12_weeks(self):
job_models.JobModel(
id=self.JOB_1_ID,
time_finished_msec=self.thirteen_weeks_ago_in_millisecs,
status_code=job_models.STATUS_CODE_FAILED
).put()
output = self._run_one_off_job()
self.assertItemsEqual(
output, [['SUCCESS_DELETED', 1], ['SUCCESS_KEPT', 1]])
self.assertIsNone(job_models.JobModel.get_by_id(self.JOB_1_ID))
def test_delete_job_model_canceled_older_than_12_weeks(self):
job_models.JobModel(
id=self.JOB_1_ID,
time_finished_msec=self.thirteen_weeks_ago_in_millisecs,
status_code=job_models.STATUS_CODE_CANCELED
).put()
output = self._run_one_off_job()
self.assertItemsEqual(
output, [['SUCCESS_DELETED', 1], ['SUCCESS_KEPT', 1]])
self.assertIsNone(job_models.JobModel.get_by_id(self.JOB_1_ID))
def test_keep_job_model_canceled_younger_than_12_weeks(self):
job_models.JobModel(
id=self.JOB_1_ID,
time_finished_msec=self.now_in_millisecs,
status_code=job_models.STATUS_CODE_CANCELED
).put()
output = self._run_one_off_job()
self.assertEqual(output, [['SUCCESS_KEPT', 2]])
self.assertIsNotNone(job_models.JobModel.get_by_id(self.JOB_1_ID))
``` |
{
"source": "jianershi/algorithm",
"score": 3
} |
#### File: algorithm/leetcode/1550.leetcode.py
```python
class Solution:
def threeConsecutiveOdds(self, arr: List[int]) -> bool:
count = 0
for c in arr:
if c % 2 == 1:
count += 1
else:
count = 0
if count == 3:
return True
return False
```
#### File: algorithm/leetcode/1552.leetcode.py
```python
class Solution:
def maxDistance(self, position: List[int], m: int) -> int:
position = sorted(position)
start, end = 1, position[-1]
while start + 1 < end:
mid = (start + end) // 2
if self.is_ok(mid, position, m):
start = mid
else:
end = mid
if self.is_ok(end, position, m):
return end
return start
def is_ok(self, min_dist, position, m):
i = 1
n = len(position)
m -= 1
last_position = position[0]
while m > 0:
while i < n and position[i] - last_position < min_dist:
i += 1
if i < n:
last_position = position[i]
m -= 1
if m <= 0:
return True
if i == n:
return False
if m <= 0:
return True
return False
```
#### File: algorithm/leetcode/1584.leetcode.py
```python
class Solution:
def find_min_node(self, n, key, mst_set):
min, min_node = sys.maxsize, -1
for i in range(n):
if not mst_set[i] and key[i] < min:
min, min_node = key[i], i
return min_node
def solve(self, n, edges):
mst_set = [False] * n;
key = [sys.maxsize] * n;
parent = [-1] * n;
key[0] = 0
parent[0] = -1
for i in range(n):
u = self.find_min_node(n, key, mst_set)
mst_set[u] = True
for v in range(n):
if (not mst_set[v] and edges[u][v] < key[v]):
parent[v] = u
key[v] = edges[u][v]
res = 0
for i in range(1, n):
res += edges[parent[i]][i]
return res
def minCostConnectPoints(self, points: List[List[int]]) -> int:
n = len(points)
edges = [[0] * n for _ in range(n)]
for i in range(n):
for j in range(n):
edges[i][j] = abs(points[i][0] - points[j][0]) + abs(points[i][1] - points[j][1])
return self.solve(n, edges)
```
#### File: algorithm/leetcode/1594.1.py
```python
class Solution:
def maxProductPath(self, grid: List[List[int]]) -> int:
n = len(grid)
m = len(grid[0])
MOD = 1000000007
dp_max = [[0] * m for _ in range(n)]
dp_min = [[0] * m for _ in range(n)]
dp_min[0][0] = dp_max[0][0] = grid[0][0]
#initialize top line and left line
for i in range(1, n):
dp_min[i][0] = dp_max[i][0] = dp_max[i - 1][0] * grid[i][0]
for j in range(1, m):
dp_min[0][j] = dp_max[0][j] = dp_max[0][j - 1] * grid[0][j]
for i in range(1, n):
for j in range(1, m):
if grid[i][j] >= 0:
dp_max[i][j] = max(dp_max[i][j - 1], dp_max[i - 1][j]) * grid[i][j]
dp_min[i][j] = min(dp_min[i][j - 1], dp_min[i - 1][j]) * grid[i][j]
else:
dp_max[i][j] = min(dp_min[i][j - 1], dp_min[i - 1][j]) * grid[i][j]
dp_min[i][j] = max(dp_max[i][j - 1], dp_max[i - 1][j]) * grid[i][j]
return dp_max[n - 1][m - 1] % MOD if dp_max[n - 1][m - 1] >= 0 else -1
```
#### File: algorithm/leetcode/55.dp.tle.py
```python
class Solution:
def canJump(self, nums: List[int]) -> bool:
n = len(nums)
dp = [False] * n
dp[0] = True
for j in range(n):
for i in range(j):
if dp[i] and nums[i] + i >= j:
dp[j] = True
break
return dp[n - 1]
```
#### File: algorithm/leetcode/55.greedy.py
```python
class Solution:
def canJump(self, nums: List[int]) -> bool:
n = len(nums)
reach = 0
for i in range(n):
if i > reach:
break
reach = max(reach, nums[i] + i)
return reach >= n - 1
```
#### File: algorithm/lintcode/105.2.py
```python
class Solution:
# @param head: A RandomListNode
# @return: A RandomListNode
def copyRandomList(self, head):
# write your code here
new_list_head, new_list_map = self.copy_node(head) #copy node in order, key: node
self.re_link(new_list_head, head, new_list_map)#relink
return new_list_head
def copy_node(self, head):
if not head:
return head
new_map = {}
dummy = RandomListNode(None)
new_p = dummy
p = head
while p:
new_p.next = RandomListNode(p.label)
new_map[p.label] = new_p.next
new_p = new_p.next
p = p.next
return dummy.next, new_map
def re_link(self, new_head, old_head, new_map):
p = new_head
o_p = old_head
while p:
if o_p.random:
p.random = new_map[o_p.random.label]
p = p.next
o_p = o_p.next
```
#### File: algorithm/lintcode/106.py
```python
class Solution:
"""
@param: head: The first node of linked list.
@return: a tree node
"""
def sortedListToBST(self, head):
# write your code here
# if not head:
# return None
size = self.find_length(head)
left, mid = self.helper(head, size)
return left
def find_length(self, head):
size = 0
p = head
while p:
size += 1
p = p.next
return size
def helper(self, head, length):
if length == 0:
return None, head
left, mid = self.helper(head, length // 2)
root = TreeNode(mid.val)
right, nxt = self.helper(mid.next, length - length // 2 - 1)
root.left = left
root.right = right
return root, nxt
```
#### File: algorithm/lintcode/107.1.py
```python
class Solution:
"""
@param: s: A string
@param: dict: A dictionary of words dict
@return: A boolean
"""
def wordBreak(self, s, dict):
# write your code here
if dict:
max_dict_word_length = max([len(x) for x in dict])
else:
max_dict_word_length = 0
return self.can_break(s, 0, dict, max_dict_word_length, {})
def can_break(self, s, i, dict, max_dict_word_length, memo):
if i in memo:
return memo[i]
if len(s) == i:
return True
for index in range(i, len(s)):
if index - i > max_dict_word_length:
break
if s[i:index + 1] not in dict:
continue
if self.can_break(s, index + 1, dict, max_dict_word_length, memo):
memo[i] = True
return memo[i]
return False
```
#### File: algorithm/lintcode/107.dfs.py
```python
class Solution:
"""
@param: s: A string
@param: dict: A dictionary of words dict
@return: A boolean
"""
def wordBreak(self, s, dict):
return self.dfs(s, dict, 0)
def dfs(self, s, word_lists, index):
if index == len(s):
return True
if index > len(s):
return False
for word in word_lists:
if s[index:].startswith(word):
if self.dfs(s, word_lists, index + len(word)):
return True
return False
```
#### File: algorithm/lintcode/117.1.py
```python
import sys
class Solution:
"""
@param A: A list of integers
@return: An integer
"""
def jump(self, A):
# write your code here
if not A:
return -1
n = len(A)
dp = [sys.maxsize] * n
dp[0] = 0
for i in range(1, n):
for j in range(i):
if A[j] >= i - j:
dp[i] = min(dp[i], dp[j] + 1)
break
return dp[n - 1]
```
#### File: algorithm/lintcode/1179.py
```python
from collections import deque
class Solution:
"""
@param M: a matrix
@return: the total number of friend circles among all the students
"""
def findCircleNum(self, M):
# Write your code here
visited = set()
count = 0
for person_id in range(len(M)):
if person_id in visited:
continue
count += 1
self.bfs(person_id, M, visited)
return count
def bfs(self, person_id, M, visited):
queue = deque()
queue.append(person_id)
visited.add(person_id)
while queue:
now_id = queue.popleft()
for friend_id, is_friend in enumerate(M[now_id]):
if friend_id in visited:
continue
if is_friend:
queue.append(friend_id)
visited.add(friend_id)
```
#### File: algorithm/lintcode/1181.py
```python
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: a root of binary tree
@return: return a integer
"""
def diameterOfBinaryTree(self, root):
# write your code here
m_d, _ = self.dfs(root)
return m_d
def dfs(self, root):
if not root:
return 0, 0
left_m_d, left_m_c = self.dfs(root.left)
right_m_d, right_m_c = self.dfs(root.right)
m_d = max(left_m_d, right_m_d, left_m_c + right_m_c)
m_c = max(left_m_c, right_m_c) + 1
return m_d, m_c
```
#### File: algorithm/lintcode/1219.py
```python
import sys
class Solution:
"""
@param houses: positions of houses
@param heaters: positions of heaters
@return: the minimum radius standard of heaters
"""
def findRadius(self, houses, heaters):
# Write your code here
min_radius = -sys.maxsize
heaters.sort()
for house in houses:
min_radius = max(min_radius, self.binary_search(house, heaters))
return min_radius if min_radius != sys.maxsize else -1
def binary_search(self, house, heaters):
start, end = 0, len(heaters) - 1
min_distance = sys.maxsize
while start + 1 < end:
mid = (start + end) // 2
if house < heaters[mid]:
end = mid
if house > heaters[mid]:
start = mid
if house == heaters[mid]:
return 0
min_distance = min(min_distance, abs(house - heaters[start]), abs(heaters[end] - house))
return min_distance
s = Solution()
houses, heaters = [1,2,3,4],[1,4]
print(s.findRadius(houses, heaters))
```
#### File: algorithm/lintcode/12.1.py
```python
class MinStack:
def __init__(self):
# do intialization if necessary
self.stack = []
self.min_stack = []
"""
@param: number: An integer
@return: nothing
"""
def push(self, number):
# write your code here
self.stack.append(number)
if len(self.min_stack) == 0 or number <= self.min_stack[-1]:
self.min_stack.append(number)
"""
@return: An integer
"""
def pop(self):
# write your code here
if self.min_stack and self.stack and self.min_stack[-1] == self.stack[-1]:
self.min_stack.pop()
return self.stack.pop()
"""
@return: An integer
"""
def min(self):
# write your code here
return self.min_stack[-1]
```
#### File: algorithm/lintcode/124.py
```python
class Solution:
"""
@param num: A list of integers
@return: An integer
"""
def longestConsecutive(self, num):
# write your code here
if not num:
return 0
hash = set(num)
max_count = 1
for i_n in num:
if i_n - 1 not in hash:
count = 1
while i_n + 1 in hash:
count += 1
i_n += 1
max_count = max(count, max_count)
return max_count
```
#### File: algorithm/lintcode/1271.1.py
```python
class Solution:
"""
@param nums: the gievn integers
@return: the total Hamming distance between all pairs of the given numbers
"""
def totalHammingDistance(self, nums):
# Write your code here
res = 0
n = len(nums)
ones = [0] * 30
for num in nums:
j = 29
while num > 0:
ones[j] += num & 1
num >>= 1
j -= 1
for i in range(30):
res += ones[i] * (len(nums) - ones[i])
return res
```
#### File: algorithm/lintcode/1305.py
```python
class Solution:
"""
@param num: a non-negative integer
@return: english words representation
"""
def numberToWords(self, num):
# Write your code here
res = ""
if num == 0:
return "Zero"
thousands = ["Billion", "Million", "Thousand"]
n = 1000000000
for i in range(3):
if num // n:
res += self.convertHundred(num // n) + " " + thousands[i]
num %= n
n //= 1000
res += self.convertHundred(num)
return res[1:]
def convertHundred(self, num):
underTwenty = ["", "One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine", "Ten", "Eleven", "Twelve", "Thirteen", "Fourteen", "Fiftenn", "Seventeen", "Eighteen", "Nineteen"]
tens = ["", "", "Twenty", "Thirty", "Forty", "Fifty", "Sixty", "Seventy", "Eighty", "Ninety"]
res = ""
if num >= 100:
res += " " + underTwenty[num // 100] + " " + "Hundred"
num %= 100
if num >= 20:
res += " " + tens[num // 10]
num %= 10
if num != 0:
res += " " + underTwenty[num]
return res
```
#### File: algorithm/lintcode/1310.py
```python
class Solution:
"""
@param nums: an array of integers
@return: the product of all the elements of nums except nums[i].
"""
def productExceptSelf(self, nums):
# write your code here
prefix_sum = self.build_prefix_product(nums)
suffix_sum = self.build_prefix_product(nums[::-1])[::-1]
result = [0] * len(nums)
for i in range(len(result)):
result[i] = prefix_sum[i] * suffix_sum[i + 1]
"""
index 0123
^
str 3841
^
prefix_sum = [1, 3, 24, 96, 1]
prefix_sum[2] = s[0] * s[1] = 3 * 8
index 3210
^
str[::-1] 1483
^
prefix_sum of flipped string = [1, 1, 4, 32, 96]
s[2] = 8
prefix_sum of flipped string [2] = 4
postfix_sum = prefix_sum of flipped string[::-1] = [96, 32, 4, 1, 1]
s[2] = 8
postfix_sum[2 + 1] = 4
that's why there is + 1
"""
return result
def build_prefix_product(self, nums):
prefix_product = [None] * (len(nums) + 1)
prefix_product[0] = 1
for i in range(1, len(nums) + 1):
prefix_product[i] = prefix_product[i - 1] * nums[i - 1]
return prefix_product
```
#### File: algorithm/lintcode/13.1.py
```python
class Solution:
"""
@param source:
@param target:
@return: return the index
"""
"""
2个循环,外面的循环source,内部循环target。要是不符合就break,要是内部循环完成就表示找到了。遍历了所有要是没有就不存在
"""
def strStr(self, source, target):
#异常检测
if not target:
return 0
if len(target) > len(source):
return -1
# Write your code here
for index in range(len(source)):
ptr = 0
while ptr < len(target) and index + ptr < len(source):
if target[ptr] != source[index + ptr]:
break
ptr += 1
if ptr == len(target):
return index
return -1
def main():
s = Solution()
print (s.strStr("abcdg","cdg"))
if __name__ == "__main__":
main()
```
#### File: algorithm/lintcode/132.2.py
```python
class TrieNode:
def __init__(self):
self.children = {}
self.is_word = False
self.word = None
class Trie:
def __init__(self):
self.root = TrieNode()
def add(self, words):
for word in words:
node = self.root
for c in word:
node.children[c] = node.children.get(c, TrieNode())
node = node.children[c]
node.is_word = True
node.word = word
def search_prefix(self, prefix):
node = self.root
for c in prefix:
if c not in node.children:
return []
node = node.children[c]
return node.word_under_prefix
DIRECTIONS = [
(0, 1),
(0, -1),
(-1, 0),
(1, 0)
]
class Solution:
"""
@param board: A list of lists of character
@param words: A list of string
@return: A list of string
"""
def wordSearchII(self, board, words):
# write your code here
if not board or not board[0]:
return []
results = set()
n = len(board)
m = len(board[0])
trie = Trie()
trie.add(words)
for i in range(n):
for j in range(m):
self.search_word(board, i, j, trie, trie.root.children.get(board[i][j]), set([(i,j)]), results)
return list(results)
def search_word(self, board, i, j, trie, node, visited, results):
"""
if a none node get passed down, end current search
"""
if node == None:
return
"""
dfs search on board along with tria. since only after reaching the child, can you get
if it is a word, so that's why it needs to pass down child istead of root.
"""
if node.is_word and node.word not in results:
results.add(node.word)
for delta_x, delta_y in DIRECTIONS: #chec four diretions
next_x, next_y = i + delta_x, j + delta_y
if self.is_valid(board, next_x, next_y, visited):
visited.add((next_x, next_y))
#there may or may not be a children. which is fine. and handled by exit condition of recursion.
self.search_word(board, next_x, next_y, trie, node.children.get(board[next_x][next_y]), visited, results)
visited.remove((next_x, next_y))
def is_valid(self, board, next_x, next_y, visited):
n = len(board)
m = len(board[0])
if (next_x, next_y) in visited:
return False
if not (0 <= next_x < n and 0 <= next_y < m):
return False
return True
s = Solution()
board = ["doaf","agai","dcan"]
words = ["dog","dad","dgdg","can","again"]
print(s.wordSearchII(board, words))
```
#### File: algorithm/lintcode/134.1.py
```python
class LinkedNode:
def __init__(self, key, val):
self.key = key
self.val = val
self.next = None
class LRUCache:
"""
@param: capacity: An integer
"""
def __init__(self, capacity):
# do intialization if necessary
self.capacity = capacity
self.count = 0
self.key_to_prev = {}
self.dummy = LinkedNode(None, None)
self.tail = self.dummy
"""
@param: key: An integer
@return: An integer
"""
def get(self, key):
# write your code here
if key not in self.key_to_prev:
return -1
self.kick_to_back(key)
return self.tail.val
def kick_to_back(self, key):
prev = self.key_to_prev[key]
curr = prev.next
if curr == self.tail:
return
prev.next = curr.next
if prev.next:
self.key_to_prev[prev.next.key] = prev
self.tail.next = curr
self.key_to_prev[key] = self.tail
self.tail = self.tail.next
self.tail.next = None
"""
@param: key: An integer
@param: value: An integer
@return: nothing
"""
def set(self, key, value):
# write your code here
if key in self.key_to_prev:
self.kick_to_back(key)
self.tail.val = value
return
self.tail.next = LinkedNode(key, value)
self.key_to_prev[key] = self.tail
self.tail = self.tail.next
self.count += 1
if self.count > self.capacity:
del self.key_to_prev[self.dummy.next.key]
self.dummy = self.dummy.next
```
#### File: algorithm/lintcode/1375.2.py
```python
class Solution:
"""
@param s: a string
@param k: an integer
@return: the number of substrings there are that contain at least k distinct characters
"""
def kDistinctCharacters(self, s, k):
# Write your code here
n = len(s)
left = 0
count = [0] * 256
distinct_count = 0
substring_count = 0
for right in range(n):
count[ord(s[right])] += 1
if count[ord(s[right])] == 1:
distinct_count += 1
while left <= right and distinct_count >= k:
substring_count += n - right
count[ord(s[left])] -= 1
if count[ord(s[left])] == 0:
distinct_count -= 1
left += 1
return substring_count
```
#### File: algorithm/lintcode/141.py
```python
class Solution:
"""
@param x: An integer
@return: The sqrt of x
"""
def sqrt(self, x):
# write your code here
start, end = 0, x
while start + 1 < end:
mid = (start + end) // 2
if mid * mid < x:
start = mid
else:
end = mid
if end * end <= x:
return end
return start
```
#### File: algorithm/lintcode/1516.2.py
```python
from collections import deque
class Solution:
"""
@param arr: the arr
@param target: the target
@return: the sum of paths
"""
def xorSum(self, arr, target):
# Write your code here.
if not arr or not arr[0]:
return 0
n = len(arr)
m = len(arr[0])
hit_to_x_y_with_now_sum = {} #(x, y) : sum
queue1 = deque([(0, 0, arr[0][0])])
queue2 = deque([(n - 1, m - 1, arr[n - 1][m - 1])])
QUEUE1_DIRECTION = [(0, 1), (1, 0)]
QUEUE2_DIRECTION = [(0, -1), (-1, 0)]
visited = [[None] * m for _ in range(n)]
visited[0][0] = 0
visited[n - 1][m - 1] = 1
hit = 0
while queue1 or queue2:
if queue1:
hit += self.process_queue(queue1, arr, QUEUE1_DIRECTION, hit_to_x_y_with_now_sum, target, 0, visited)
if queue2:
hit += self.process_queue(queue2, arr, QUEUE2_DIRECTION, hit_to_x_y_with_now_sum, target, 1, visited)
return hit
def process_queue(self, queue, arr, queue_direction, hit_to_x_y_with_now_sum, target, flag, visited):
hit = 0
n = len(arr)
m = len(arr[0])
for _ in range(len(queue)):
x, y, now_sum = queue.popleft()
if x + y == (n + m) // 2:
hit_to_x_y_with_now_sum[(x, y, now_sum, flag)] = hit_to_x_y_with_now_sum.get((x, y, now_sum, flag), 0) + 1
hit += hit_to_x_y_with_now_sum.get((x, y, target ^ now_sum ^ arr[x][y], not flag), 0)
continue
for delta in queue_direction:
nx = x + delta[0]
ny = y + delta[1]
if not self.is_valid(arr, nx, ny):
continue
next_now_sum = now_sum ^ arr[nx][ny]
queue.append((nx, ny, next_now_sum))
visited[nx][ny] = flag
return hit
def is_valid(self, arr, x, y):
n = len(arr)
m = len(arr[0])
return 0 <= x < n and 0 <= y < m
s = Solution()
# arr = [[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
# target = 0
# arr = [[2,1,5],[7,10,0],[12,6,4]]
# target = 11
arr = [[1,3,3,3],[0,3,3,2],[3,0,1,1]]
target =2
print(s.xorSum(arr, target))
```
#### File: algorithm/lintcode/1565.2.py
```python
import sys
from collections import deque
class Solution:
"""
@param length: the length of board
@param connections: the connections of the positions
@return: the minimum steps to reach the end
"""
def modernLudo(self, length, connections):
transport = {}
for start, end in connections:
transport[start] = transport.get(start, set())
transport[start].add(end)
queue = deque([1])
distance = {1:0}
while queue:
head = queue.popleft()
for end in transport.get(head, set()):
if end in distance and distance[end] < distance[head]:
continue
queue.append(end)
distance[end] = distance[head]
for i in range(1, 7):
next_pos = head + i
if not self.is_valid(next_pos, length, distance):
continue
queue.append(next_pos)
distance[next_pos] = distance[head] + 1
return distance[length]
def is_valid(self, next_pos, length, distance):
if next_pos < 0 or next_pos > length:
return False
if next_pos in distance:
return False
return True
```
#### File: algorithm/lintcode/167.py
```python
from collections import deque
class Solution:
"""
@param l1: the first list
@param l2: the second list
@return: the sum list of l1 and l2
"""
def addLists(self, l1, l2):
# write your code here
s1 = []
s2 = []
while l1:
s1.append(l1.val)
l1 = l1.next
while l2:
s2.append(l2.val)
l2 = l2.next
s = []
carry = 0
i, j = 0, 0
while i <= len(s1) - 1 or j <= len(s2) - 1:
if i >= len(s1):
p1 = 0
if j >= len(s2):
p2 = 0
if i < len(s1):
p1 = s1[i]
i += 1
if j < len(s2):
p2 = s2[j]
j += 1
s.append((p1 + p2 + carry) % 10)
carry = (p1 + p2 + carry) // 10
if carry:
s.append(carry)
dummy = ListNode(0)
tail = dummy
for digit in s:
tail.next = ListNode(digit)
tail = tail.next
return dummy.next
```
#### File: algorithm/lintcode/178.py
```python
class Solution:
"""
@param n: An integer
@param edges: a list of undirected edges
@return: true if it's a valid tree, or false
"""
def validTree(self, n, edges):
# write your code here
if len(edges) != n - 1:
return False
if n == 1 and edges == []:
return True
self.father = {}
self.count = 0
for a, b in edges:
if a not in self.father:
self.father[a] = a
self.count += 1
if b not in self.father:
self.father[b] = b
self.count += 1
if self.union(a, b):
self.count -= 1
return self.count == 1
def union(self, a, b):
a_father = self.find(a)
b_father = self.find(b)
if a_father == b_father:
return False
self.father[a_father] = b_father
return True
def find(self, x):
if self.father[x] == x:
return x
self.father[x] = self.find(self.father[x])
return self.father[x]
```
#### File: algorithm/lintcode/1827.py
```python
# class Solution:
# """
# @param steps: steps you can move
# @param arrLen: the length of the array
# @return: Number of Ways to Stay in the Same Place After Some Steps
# """
# def numWays(self, steps, arrLen):
# MOD = 1000000007
# arrLen = min(steps // 2 + 1, arrLen)
# dp = [[0] * arrLen for _ in range(2)]
# # dp1 = [0] * arrLen
# # dp2 = [0] * arrLen
# dp[0][0] = 1
# # dp2[0] = 1
# # dp[0][0] = 1
# for i in range(1, steps + 1):
# for j in range(0, arrLen):
# dp[i % 2][j] = dp[(i - 1) % 2][j]
# if j > 0:
# dp[i % 2][j] = (dp[i % 2][j] % MOD + dp[(i - 1) % 2][j - 1] % MOD) % MOD
# if j < arrLen - 1:
# dp[i % 2][j] = (dp[i % 2][j] % MOD + dp[(i - 1) % 2][j + 1] % MOD) % MOD
# # print (dp[steps % 2][0])
# return dp[steps % 2][0] % MOD
# class Solution:
# """
# @param steps: steps you can move
# @param arrLen: the length of the array
# @return: Number of Ways to Stay in the Same Place After Some Steps
# """
# def numWays(self, steps, arrLen):
# # write your code here
# memo = {} # index, curr_step
# # print(memo)
# return self.dfs(steps, arrLen, 0, steps, memo)
#
# """
# return true if curr_step is 0 and back in origin
# return False otherwise
# recursively go to 3 directions that is within range, need to have counter.
# easisest way is to have global counter, but that is not very nice.
# but is the easiest way.
#
# divide and conquer, the total number of ways is the sum of ways of 3 directions.
#
# """
# def dfs(self, steps, arr_len, index, curr_step, memo):
# MOD = 1000000007
# if not (0 <= index < arr_len):
# return 0
# if (index, curr_step) in memo:
# return memo[(index, curr_step)]
# if curr_step == 0:
# if index == 0:
# memo[(index, curr_step)] = 1
# else:
# memo[(index, curr_step)] = 0
# return memo[(index, curr_step)]
#
# left_total = self.dfs(steps, arr_len, index - 1, curr_step - 1, memo) % MOD
# right_total = self.dfs(steps, arr_len, index + 1, curr_step - 1, memo) % MOD
# center_total = self.dfs(steps, arr_len, index, curr_step - 1, memo) % MOD
# memo[(index, curr_step)] = (left_total + right_total + center_total) % MOD
# return memo[(index, curr_step)]
```
#### File: algorithm/lintcode/1835.dp.py
```python
class Solution:
"""
@param steps: steps you can move
@param arrLen: the length of the array
@return: Number of Ways to Stay in the Same Place After Some Steps
"""
def numWays(self, steps, arrLen):
arrLen = min(steps // 2 + 1, arrLen)
dp = [[0] * arrLen for _ in range(2)]
# dp1 = [0] * arrLen
# dp2 = [0] * arrLen
dp[0][0] = 1
# dp2[0] = 1
# dp[0][0] = 1
for i in range(1, steps + 1):
for j in range(0, arrLen):
dp[i % 2][j] = dp[(i - 1) % 2][j]
if j > 0:
dp[i % 2][j] += dp[(i - 1) % 2][j - 1]
if j < arrLen - 1:
dp[i % 2][j] += dp[(i - 1) % 2][j + 1]
# print (dp[steps % 2][0])
return dp[steps % 2][0]
s = Solution()
steps = 15
arrLen = 1000000
s.numWays(steps, arrLen)
```
#### File: algorithm/lintcode/194.py
```python
class Solution:
"""
@param str: the string
@param dict: the dictionary
@return: return words which are subsequences of the string
"""
def findWords(self, str, dict):
# Write your code here.
if not str or not dict:
return []
n = len(str)
results = []
next_char = self.build_next_char(str)
for word in dict:
i = 0
j = 0
m = len(word)
while i < n and j < m:
i = next_char[i][ord(word[j]) - ord('a')]
if i == n:
break
j += 1
if j == m:
results.append(word)
return results
"""
if not found, default to len(str)
"""
def build_next_char(self, str):
n = len(str)
next_char = [[n] * 26 for _ in range(n + 1)]
for i in range(n - 1, -1, -1):
for j in range(26):
next_char[i][j] = next_char[i + 1][j]
if ord(str[i]) - ord('a') == j:
next_char[i][j] = i
return next_char
s = Solution()
# 0123456789012345678920
str = "bcogtadsjofisdhklasdj"
dict=["book","code","tag"]
print (s.findWords(str, dict))
```
#### File: algorithm/lintcode/22.py
```python
class Solution(object):
# @param nestedList a list, each element in the list
# can be a list or integer, for example [1,2,[1,2]]
# @return {int[]} a list of integer
def flatten(self, nestedList):
# Write your code here
results = []
self.dfs(nestedList, 0, results)
return results
def dfs(self, curr_list, index, results):
if curr_list == []:
return
if index == len(curr_list):
results.append(curr_list[:])
return
for item in curr_list:
if isinstance(item, list):
self.dfs(item, 0, results)
else:
results.append(item)
s = Solution()
nestedList = [[1,1],2,[1,1]]
print(s.flatten(nestedList))
```
#### File: algorithm/lintcode/271.py
```python
from collections import deque
class Solution:
"""
@param str: the prefix notation.
@return: return the postfix notation.
"""
def prefixNotationToPostfixNotation(self, str):
# write your code here.
if not str:
return ""
stack = []
str = str.split(" ")[::-1]
operator = set(["+","-","*","/"])
for i in str:
if i in operator:
op1 = stack.pop()
op2 = stack.pop()
stack.append(op1 + " " + op2 + " " + i)
else:
stack.append(i)
return stack[0]
s = Solution()
str = "+ * A B * C D"
print(s.prefixNotationToPostfixNotation(str))
```
#### File: algorithm/lintcode/291.py
```python
from collections import deque
class Solution:
"""
@param edge: edge[i][0] [1] [2] start point,end point,value
@return: return the second diameter length of the tree
"""
def getSecondDiameter(self, edge):
# write your code here
n = len(edge)
graph = {} #{node: {node:distance}}
for e in edge:
if e[0] not in graph:
graph[e[0]] = {}
graph[e[0]][e[1]] = e[2]
if e[1] not in graph:
graph[e[1]] = {}
graph[e[1]][e[0]] = e[2]
longest_index, _ = self.bfs(edge[0][0], graph)
longest_index, distance1 = self.bfs(longest_index, graph)
_, distance2 = self.bfs(longest_index, graph)
return max(sorted(distance1.values())[-2],sorted(distance2.values())[-2])
def bfs(self, root, graph):
queue = deque([(root, 0)])
distance = {root: 0}
while queue:
now, dist = queue.popleft()
for node in graph[now]:
if node in distance:
continue
queue.append((node, dist + graph[now][node]))
distance[node] = dist + graph[now][node]
max_dist = 0
max_dist_key = None
for key, value in distance.items():
if value > max_dist:
max_dist = value
max_dist_key = key
return max_dist_key, distance
```
#### File: algorithm/lintcode/29.py
```python
class Solution:
"""
@param s1: A string
@param s2: A string
@param s3: A string
@return: Determine whether s3 is formed by interleaving of s1 and s2
"""
def isInterleave(self, s1, s2, s3):
# write your code here
m = len(s1)
n = len(s2)
dp = [[False] * (n + 1) for _ in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 and j == 0:
dp[0][0] = True
continue
dp[i][j] = dp[i - 1][j] and s3[i + j - 1] == s1[i - 1] \
or dp[i][j - 1] and s3[i + j - 1] == s2[j - 1]
return dp[m][n]
```
#### File: algorithm/lintcode/318.py
```python
class Solution:
"""
@param A: A string
@param B: A string
@return: A string array
"""
def characterGrid(self, A, B):
# write your code here.
n = len(A)
m = len(B)
found_index = None, None
for i, c in enumerate(A):
found = False
index = None
for j in range(m):
if B[j] == c:
found = True
index = j
break
if found:
found_index = i, index
break
res = [["."] * n for _ in range(m)]
a, b = found_index
for i in range(n):
res[b][i] = A[i]
for i in range(m):
res[i][a] = B[i]
res2 = []
for line in res:
res2.append("".join(line))
return res2
s = Solution()
A = "BANANA"
B = "APPLE"
print(s.characterGrid(A, B))
```
#### File: algorithm/lintcode/328.py
```python
class Solution:
"""
@param s: a string
@return: an array containing the length of each part
"""
def splitString(self, s):
# write your code here.
map = {}
for i, c in enumerate(s):
map[c] = map.get(c, [])
map[c].append(i)
intermediate = []
for k,v in map.items():
intermediate.append([min(v), max(v)])
intermediate.sort()
res = []
for ran in intermediate:
if (len(res) == 0 or ran[0] > res[-1][1]):
res.append(ran)
else:
res[-1][1] = max(res[-1][1], ran[1])
return [x[1]-x[0]+1 for x in res]
```
#### File: algorithm/lintcode/355.py
```python
from collections import deque
class Solution:
"""
@param heights: the heights of buildings.
@param k: the vision.
@param x: the energy to spend of the first action.
@param y: the energy to spend of the second action.
@return: the minimal energy to spend.
"""
def shuttleInBuildings(self, heights, k, x, y):
# write your code here.
stack = []
n = len(heights)
first_highest = [-1] * n
for i in range(n):
while stack and heights[stack[-1]] < heights[i]:
idx = stack.pop()
if i - idx <= k:
first_highest[i] = idx
stack.append(i)
dp = [sys.maxsize] * (n)
dp[0] = 0
for i in range(1,n):
dp[i] = min(dp[i],dp[i - 1] + y)
if i >= 2:
dp[i] = min(dp[i],dp[i - 2] + y)
if first_highest[i] != -1:
dp[i] = min(dp[i],dp[first_highest[i]] + x)
return dp[n-1]
```
#### File: algorithm/lintcode/359.py
```python
class Solution:
"""
@param lengths: the lengths of sticks at the beginning.
@return: return the minimum number of cuts.
"""
def makeEquilateralTriangle(self, lengths):
# write your code here.
lengths.sort()
counters = {}
n = len(lengths)
max_count, num = 0, -1
for w in lengths:
if max_count == 2:
if w != num:
return 1
else:
return 0
counters[w] = counters.get(w,0) + 1
if (counters[w] > max_count):
max_count = counters[w]
num = w
if w % 2 == 0 and w // 2 in counters:
return 1
return 2
```
#### File: algorithm/lintcode/360.py
```python
import heapq
class Heap:
def __init__(self):
self.heap = []
self.deleted = {}
self.length = 0
def push(self, element):
heapq.heappush(self.heap, element)
self.length += 1
def pop(self):
self.remove_redundant()
self.length -= 1
return heapq.heappop(self.heap)
def remove(self, element):
self.deleted[element] = self.deleted.get(element, 0) + 1
self.length -= 1
def remove_redundant(self):
while self.heap and self.deleted.get(self.heap[0]):
removed_duplicate = heapq.heappop(self.heap)
self.deleted[removed_duplicate] -= 1
if self.deleted[removed_duplicate] == 0:
del self.deleted[removed_duplicate]
def peek(self):
self.remove_redundant()
return self.heap[0]
def __len__(self):
return self.length
def __repr__(self):
return repr(self.heap)
class Solution:
"""
@param nums: A list of integers
@param k: An integer
@return: The median of the element inside the window at each moving
"""
def medianSlidingWindow(self, nums, k):
# write your code here
max_heap = Heap()
min_heap = Heap()
removed_elements = set()
right = 0
results = []
n = len(nums)
for left in range(n):
while right < n and right - left < k:
if len(max_heap) == 0 or nums[right] <= -max_heap.peek():
max_heap.push(-nums[right])
else:
min_heap.push(nums[right])
self.balance(max_heap, min_heap)
right += 1
if right - left == k:
results.append(-max_heap.peek())
if right >= n:
break;
if nums[left] <= -max_heap.peek():
max_heap.remove(-nums[left])
else:
min_heap.remove(nums[left])
self.balance(max_heap, min_heap)
return results
def balance(self, max_heap, min_heap):
while len(max_heap) < len(min_heap):
max_heap.push(-min_heap.pop())
while len(max_heap) > len(min_heap) + 1:
min_heap.push(-max_heap.pop())
```
#### File: algorithm/lintcode/362.py
```python
from collections import deque
class Solution:
"""
@param nums: A list of integers.
@param k: An integer
@return: The maximum number inside the window at each moving.
"""
def maxSlidingWindow(self, nums, k):
# write your code here
result = []
queue = deque()
for i, num in enumerate(nums):
while queue and nums[queue[-1]] < num:
queue.pop()
queue.append(i)
if i >= k - 1:
result.append(nums[queue[0]])
if queue[0] == i - k + 1:
queue.popleft()
return result
```
#### File: algorithm/lintcode/363.py
```python
import sys
class Solution:
"""
@param heights: a list of integers
@return: a integer
"""
def trapRainWater(self, heights):
# write your code here
stack = []
water_capacity = 0
n = len(heights)
for i in range(n):
curr = heights[i]
while stack and curr >= heights[stack[-1]]:
top = stack.pop()
left = heights[stack[-1]] if stack else heights[top] #cannot hold water for the 1st item
left_index = stack[-1] if stack else -1 #easier to calculate length
length = (i - left_index - 1)
if left < curr:
water_capacity += (left - heights[top]) * length
else:
water_capacity += (curr - heights[top]) * length
stack.append(i)
return (water_capacity)
```
#### File: algorithm/lintcode/374.1.py
```python
DIRECTIONS = [
(0, 1),
(1, 0),
(0, -1),
(-1, 0)
]
from collections import deque
class Solution:
"""
@param matrix: a matrix of m x n elements
@return: an integer list
"""
def spiralOrder(self, matrix):
# write your code here
if not matrix or not matrix[0]:
return []
res = []
self.dfs(matrix, 0, 0, 0, set([(0, 0)]), res)
return res
def dfs(self, matrix, x, y, d, v, res):
res.append(matrix[x][y])
for i in range(4):
delta_x, delta_y = DIRECTIONS[(d + i) % 4]
nx, ny = x + delta_x, y + delta_y
if not self.is_valid(nx, ny, matrix, v):
continue
v.add((nx, ny))
self.dfs(matrix, nx, ny, (d + i) % 4, v, res)
v.pop()
break
def is_valid(self, x, y, matrix, visited):
n = len(matrix)
m = len(matrix[0])
if not (0 <= x < n and 0 <= y < m):
return False
if (x, y) in visited:
return False
return True
```
#### File: algorithm/lintcode/380.py
```python
class Solution:
"""
@param headA: the first list
@param headB: the second list
@return: a ListNode
"""
def getIntersectionNode(self, headA, headB):
# write your code here
if not headA or not headB:
return None
p1 = headA
while p1.next:
p1 = p1.next
p1.next = headB
slow, fast = headA, headA
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if slow == fast:
break
slow = headA
while slow != fast:
fast = fast.next
slow = slow.next
p1.next = None
return slow
```
#### File: algorithm/lintcode/390.py
```python
import sys
class Solution:
"""
@param: A: An integer matrix
@return: The index of the peak
"""
def findPeakII(self, A):
# write your code here
if not A or not A[0]:
return None
n = len(A)
m = len(A[0])
start_x, end_x = 0, n - 1
"""
为什么不需要考虑mid + 1或者 -1 越界呢?
因为start + 1 < end:
start + 2 <= end
start mid end
这个时候循环还可以进入。因此不会越界。
"""
while start_x + 1 < end_x:
mid_x = (start_x + end_x) // 2
col_i = self.find_max_in_col(A, mid_x)
if A[mid_x][col_i] < A[mid_x + 1][col_i]:
start_x = mid_x
elif A[mid_x][col_i] < A[mid_x - 1][col_i]:
end_x = mid_x
else:
return [mid_x, col_i]
up_col_i = self.find_max_in_col(A, start_x)
down_col_i = self.find_max_in_col(A, end_x)
if A[start_x][up_col_i] > A[end_x][down_col_i]:
return start_x, up_col_i
return end_x, down_col_i
def find_max_in_col(self, A, row_index):
index, max_value = None, -sys.maxsize
for i, v in enumerate(A[row_index]):
if v > max_value:
max_value = v
index = i
return index
```
#### File: algorithm/lintcode/391.1.py
```python
"""
Definition of Interval.
class Interval(object):
def __init__(self, start, end):
self.start = start
self.end = end
"""
import sys
class Event:
TAKEOFF = 1
LANDING = 0
class Solution:
"""
@param airplanes: An interval array
@return: Count of airplanes are in the sky.
"""
def countOfAirplanes(self, airplanes):
if not airplanes:
return 0
list = []
for interval in airplanes:
list.append((interval.start, Event.TAKEOFF))
list.append((interval.end, Event.LANDING))
list.sort()
count = 0
max_count = -sys.maxsize
for time, event in list:
if event == Event.TAKEOFF:
count += 1
else:
count -= 1
max_count = max(max_count, count)
return max_count
```
#### File: algorithm/lintcode/404.1.py
```python
class Solution:
"""
@param A: An integer array
@param start: An integer
@param end: An integer
@return: the number of possible answer
"""
def subarraySumII(self, A, start, end):
# write your code here
n = len(A)
prefix_sum = [0] * n
l = r = 0
count = 0
for j in range(n):
if j == 0:
prefix_sum[j] = A[0]
else:
prefix_sum[j] = prefix_sum[j - 1] + A[j]
"""
.....xxxoooooxxxxx..j
l r
"""
while l <= j and prefix_sum[j] - prefix_sum[l - 1] > end:
l += 1
while r <= j and prefix_sum[j] - prefix_sum[r - 1] >= start:
r += 1
count += r - l
return count
```
#### File: algorithm/lintcode/406.py
```python
import sys
class Solution:
"""
@param nums: an array of integers
@param s: An integer
@return: an integer representing the minimum size of subarray
"""
def minimumSize(self, nums, s):
# write your code here
prefix_sum = [0]
sum = 0
for i in range(1, len(nums) + 1):
sum += nums[i - 1]
prefix_sum.append(sum)
"""
01234
i j
xxxxxxxxo
"""
minimum_length = sys.maxsize
right = 0
for left in range(len(nums)):
while right < len(nums) and prefix_sum[right + 1] - prefix_sum[left] < s:
right += 1
if right >= len(nums):
break
minimum_length = min(minimum_length, right - left + 1)
return minimum_length if minimum_length != sys.maxsize else -1
```
#### File: algorithm/lintcode/413.py
```python
class Solution:
"""
@param n: the integer to be reversed
@return: the reversed integer
"""
def reverseInteger(self, n):
# write your code here
a = str(abs(n))
sign = n > 0
res = int(a[::-1])
if n > 0 and res <= 1<<31 - 1:
return res
if n < 0 and -res >= -1<<31:
return -res
return 0
```
#### File: algorithm/lintcode/415.1.py
```python
class Solution:
"""
@param s: A string
@return: Whether the string is a valid palindrome
"""
def isPalindrome(self, s):
# write your code here
# 先processs string to minimal format O(n)
# 2跟指针,1跟从前往后,1跟从后往前。重合的时候就结束。
# 总体时间法度 O(n)
start, end = 0, len(s) - 1
while start < end:
#跳过无用字符
while start < end and not s[start].isalpha() and not s[start].isnumeric(): start += 1
while start < end and not s[end].isalpha() and not s[end].isnumeric(): end -= 1
if s[start].lower() != s[end].lower():
return False
start += 1
end -= 1
return True
```
#### File: algorithm/lintcode/419.py
```python
class Solution:
"""
@param s: Roman representation
@return: an integer
"""
def romanToInt(self, s):
# write your code here
CHAR_TO_DIGIT = {
"I": 1,
"V": 5,
"X": 10,
"L": 50,
"C": 100,
"D": 500,
"M": 1000
}
result = []
for i in range(len(s)):
if result and result[-1] < CHAR_TO_DIGIT[s[i]]:
result[-1] = -result[-1]
result.append(CHAR_TO_DIGIT[s[i]])
return sum(result)
s = Solution()
print(s.romanToInt("IV"))
```
#### File: algorithm/lintcode/428.2.py
```python
class Solution:
"""
@param x {float}: the base number
@param n {int}: the power number
@return {float}: the result
"""
def myPow(self, x, n):
# write your code here
if n == 0:
return 1
ans = myPow (x, n // 2)
if n % 2 == 0:
return ans * ans
return ans * ans * x
```
#### File: algorithm/lintcode/433.py
```python
from collections import deque
class Solution:
"""
@param grid: a boolean 2D matrix
@return: an integer
"""
def numIslands(self, grid):
# write your code here
"""
由点及面问题
traverse entire 2D Matrix.
for each point, put in BFS(with visited)
"""
island = 0
visited = set()
for row_i in range(len(grid)):
for column_i in range(len(grid[0])):
if grid[row_i][column_i] and (row_i, column_i) not in visited: #遇到是1的点,从那个点开始做bfs. 但是保持一个全局的visited 防止重复岛屿
self.bfs(grid, visited, row_i, column_i)
island += 1
return island
"""
围绕一个点(root)进行BFS搜索
如果隔壁的点访问过则跳过,没访问过驾到队列里展开。
只对是1的点进行操作,0的点默认掠过
"""
def bfs(self, grid, visited, row_i, column_i):
queue = deque([(row_i, column_i)])
visited.add((row_i, column_i))
while (queue):
(head_x, head_y) = queue.popleft()
for delta_vect_x, delta_vect_y in [(1, 0), (0, -1), (-1, 0), (0, 1)]: #上下左右
new_location_index = (head_x + delta_vect_x, head_y + delta_vect_y)
if not (self.check_valid(new_location_index, grid, visited)):
continue
visited.add(new_location_index)
queue.append(new_location_index)
def check_valid(self, location, grid, visited):
if location[0] < 0 or location[0] >= len(grid):
return False
if location[1] < 0 or location[1] >= len(grid[0]):
return False
if location in visited:
return False
# print (location)
return grid[location[0]][location[1]]
```
#### File: algorithm/lintcode/437.dp.py
```python
# import sys
# class Solution:
# """
# @param pages: an array of integers
# @param k: An integer
# @return: an integer
# """
# def copyBooks(self, pages, K):
# # write your code here
# if not pages:
# return 0
# f = [[sys.maxsize] * len(pages) for _ in range(K + 1)]
# for i in range(len(pages)):
# f[0][i] = sys.maxsize
# for k in range(K + 1):
# f[k][0] = 0
#
# for k in range(K + 1):
# for i in range(len(pages)):
# sum = 0
# for j in range(0, i + 1):
# f[k][i] = min(f[k][i], max(f[k - 1][j], sum))
#
```
#### File: algorithm/lintcode/437.py
```python
class Solution:
"""
@param pages: an array of integers
@param k: An integer
@return: an integer
"""
def copyBooks(self, pages, k):
# write your code here
if not pages:
return 0
start, end = max(pages), sum(pages)
while start + 1 < end:
mid = (start + end) // 2
if self.copiers_needed(pages, mid) > k:
start = mid
else:
end = mid
if self.copiers_needed(pages, start) <= k:
return start
if self.copiers_needed(pages, end) <= k:
return end
return -1
def copiers_needed(self, pages, timelimit):
head_count = 1
pages_on_hand = 0
for page in pages:
if pages_on_hand + page > timelimit:
head_count += 1
pages_on_hand = 0
pages_on_hand += page
return head_count
```
#### File: algorithm/lintcode/442.1.py
```python
class TrieNode:
def __init__(self):
self.children = {}
self.is_word = False
class Trie:
def __init__(self):
# do intialization if necessary
self.root = TrieNode()
"""
@param: word: a word
@return: nothing
"""
def insert(self, word):
# write your code here
node = self.root
for c in word:
node.children[c] = node.children.get(c, TrieNode())
node = node.children[c]
node.is_word = True
"""
@param: word: A string
@return: if the word is in the trie.
"""
def search(self, word):
# write your code here
node = self.find(word)
return node is not None and node.is_word
def find(self, word):
node = self.root
for c in word:
if c not in node.children:
return None
node = node.children[c]
return node
"""
@param: prefix: A string
@return: if there is any word in the trie that starts with the given prefix.
"""
def startsWith(self, prefix):
# write your code here
node = self.root
for c in prefix:
if c not in node.children:
return False
node = node.children[c]
return True
```
#### File: algorithm/lintcode/463.merge_sort.py
```python
class Solution:
"""
@param A: an integer array
@return: nothing
"""
def sortIntegers(self, A):
# write your code here
if not A:
return
temp = [0] * len(A)
self.merge_sort(A, 0, len(A) - 1, temp)
def merge_sort(self, nums, start, end, temp):
if start >= end:
return
self.merge_sort(nums, start, (start + end) // 2, temp)
self.merge_sort(nums, (start + end) // 2 + 1, end, temp)
self.merge_sort_merge(nums, start, end, temp)
def merge_sort_merge(self, nums, start, end, temp):
mid = (start + end) // 2
index = start
left = start
right = mid + 1
while left <= mid and right <= end:
if nums[left] < nums[right]:
temp[index] = nums[left]
left += 1
else:
temp[index] = nums[right]
right += 1
index += 1
while left <= mid:
temp[index] = nums[left]
left += 1
index += 1
while right <= end:
temp[index] = nums[right]
right += 1
index += 1
for index in range(start, end + 1):
nums[index] = temp[index]
```
#### File: algorithm/lintcode/473.py
```python
class TrieNode:
def __init__(self):
self.children = {}
self.is_word = False
class WordDictionary:
def __init__(self):
self.root = TrieNode()
"""
@param: word: Adds a word into the data structure.
@return: nothing
"""
def addWord(self, word):
# write your code here
node = self.root
for c in word:
node.children[c] = node.children.get(c, TrieNode())
node = node.children[c]
node.is_word = True
"""
@param: word: A word could contain the dot character '.' to represent any one letter.
@return: if the word is in the data structure.
"""
def search(self, word):
# write your code here
return self.search_dfs(word, 0, self.root)
"""
@param: word: A word could contain the dot character '.' to represent any one letter.
@param: i: index in word
@param: node: node to start searching
@return: if the word is in the data structure.
"""
def search_dfs(self, word, i, node):
if i == len(word):
return node.is_word
if word[i] != '.':
if word[i] not in node.children:
return False
return self.search_dfs(word, i + 1, node.children[word[i]])
for child in node.children:
if self.search_dfs(word, i + 1, node.children[child]):
return True
return False
```
#### File: algorithm/lintcode/476.dp.py
```python
import sys
class Solution:
"""
@param A: An integer array
@return: An integer
"""
def stoneGame(self, A):
if not A:
return 0
dp = [[sys.maxsize] * len(A) for _ in range(len(A))]
for i in range(len(A)):
dp[i][i] = 0
prefix_sum = self.calculate_prefix_sum(A)
for length in range(1, len(A) + 1):
for start in range(0, len(A) - length + 1):
end = start + length - 1
# curr_range_sum = sum(A[start: end + 1])
if start == 0:
curr_range_sum = prefix_sum[end]
else:
curr_range_sum = prefix_sum[end] - prefix_sum[start - 1]
for k in range(start, end):
dp[start][end] = min(dp[start][end], dp[start][k] + dp[k + 1][end] + curr_range_sum)
# print (dp[0][len(A) - 1])
return (dp[0][len(A) - 1])
def calculate_prefix_sum(self, nums):
if not nums:
return None
prefix_sum = [0] * len(nums)
prefix_sum[0] = nums[0]
for i in range(1, len(nums)):
prefix_sum[i] = (prefix_sum[i - 1] + nums[i])
return prefix_sum
A = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
s = Solution()
print(s.stoneGame(A))
```
#### File: algorithm/lintcode/476.memo.py
```python
import sys
class Solution:
"""
@param A: An integer array
@return: An integer
"""
def stoneGame(self, A):
# write your code here
if not A:
return 0
memo = [[sys.maxsize] * len(A) for _ in range(len(A))]
return (self.memo_search(A, 0, len(A) - 1, memo))
"""
return minimum cost for range A[start: end + 1]
"""
def memo_search(self, nums, start, end, memo):
if memo[start][end] != sys.maxsize:
return memo[start][end]
if start >= end:
return 0
current_step_cost = sum(nums[start: end + 1])
min_cost = sys.maxsize
for k in range(start, end):
left_min_cost = self.memo_search(nums, start, k, memo)
right_min_cost = self.memo_search(nums, k + 1, end, memo)
min_cost = min(min_cost, left_min_cost + right_min_cost + current_step_cost)
memo[start][end] = min_cost
return min_cost
A = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
s = Solution()
print(s.stoneGame(A))
```
#### File: algorithm/lintcode/499.py
```python
class WordCount:
# @param {str} line a text, for example "Bye Bye see you next"
def mapper(self, _, line):
# Write your code here
# Please use 'yield key, value'
word_lists = line.split(" ")
for word in word_lists:
yield word, 1
# @param key is from mapper
# @param values is a set of value with the same key
def reducer(self, key, values):
# Write your code here
# Please use 'yield key, value'
yield key, sum(values)
```
#### File: algorithm/lintcode/507.py
```python
class Solution:
"""
@param: nums: A list of integers
@return: nothing
"""
def wiggleSort(self, nums):
# write your code here
if not nums:
return
n = len(nums)
mean = self.get_mean(nums)
self.three_way_partition(nums, 0, n - 1, mean)
result = [0] * n
result[::2] = nums[n // 2:]
result[1::2] = nums[: n // 2]
nums[:] = result
def get_mean(self, nums):
n = len(nums)
if n % 2 == 1:
return self.quick_select(nums, 0, n - 1, n // 2 + 1)
return (self.quick_select(nums, 0, n - 1, n // 2) + self.quick_select(nums, 0, n - 1, n // 2 + 1)) / 2.0
def three_way_partition(self, nums, start, end, mid):
n = len(nums)
l = i = 0
r = n - 1
while i <= r:
if nums[i] > mid:
nums[i], nums[l] = nums[l], nums[i]
i += 1
l += 1
elif nums[i] < mid:
nums[i], nums[r] = nums[r], nums[i]
r -= 1
else:
i += 1
def quick_select(self, nums, start, end, k):
if start >= end:
return nums[start]
left, right = start, end
pivot = nums[(start + end) // 2]
while left <= right:
while left <= right and nums[left] < pivot:
left += 1
while left <= right and nums[right] > pivot:
right -= 1
if left <= right:
nums[left], nums[right] = nums[right], nums[left]
left += 1
right -= 1
if right - start + 1 >= k:
return self.quick_select(nums, start, right, k)
if left - start + 1 <= k:
return self.quick_select(nums, left, end, k - (left - start))
return nums[right + 1]
```
#### File: algorithm/lintcode/509.cassandra.py
```python
from YelpHelper import Location, Restaurant, GeoHash, Helper
import heapq
class MiniYelp:
def __init__(self):
# initialize your data structure here.
self.restaurants = {} #restaurant_id, restaurant
self.restaurant_name_2_id = {} #name, restaurant_id
self.restaurants_2_geolocation_prefix = {} #keep tracks of geo location prefixes of restaurants, easier to delte later
self.geo_location_table = {} #redis 分级存储, key=geocode, 4位, 5位, 6位, val= restaurant_id
self.geohashing_precision = [2500, 630, 78, 20, 2.4, 0.61, 0.076, 0.019] #for prefix 1 - 8 geohashing precision
# @param {str} name
# @param {Location} location
# @return {int} restaurant's id
def add_restaurant(self, name, location):
# Write your code here
if name in self.restaurant_name_2_id:
return self.restaurant_name_2_id[name]
new_restaurant = Restaurant.create(name, location)
self.restaurants[new_restaurant.id] = new_restaurant
self.restaurant_name_2_id[name] = new_restaurant.id
self.create_geo_location_item(location, new_restaurant.id)
return new_restaurant.id
"""
for each new restaurant, generate a geolocation prefix of length 1 - 8 and put them in the geo_location_table.
it's value is the restaurant_id
@param: location, restaurant_id
@return: None
"""
def create_geo_location_item(self, location, restaurant_id):
geohash_string = GeoHash.encode(location)
for i in range(1, 9): #calulate geohash prefix 1-8
prefix = geohash_string[0:i]
if prefix not in self.geo_location_table:
self.geo_location_table[prefix] = set()
self.geo_location_table[prefix].add(restaurant_id)
if restaurant_id not in self.restaurants_2_geolocation_prefix:
self.restaurants_2_geolocation_prefix[restaurant_id] = set()
self.restaurants_2_geolocation_prefix[restaurant_id].add(prefix)
"""
using nearest geolocation to find restaurant
"""
def find_neighbors_by_gelocation(self, location, k):
geohash_string = GeoHash.encode(location)
prefix_index_to_search = self.binary_search(0, 7, self.geohashing_precision, k) #find the next largest number
if prefix_index_to_search == -1:
#that means the distance required is larger than the maximum precision, has to search the entire table, this should rarely happen
return self.search_all_restaurants_for_nearest(location, k)
if geohash_string[0:prefix_index_to_search + 1] in self.geo_location_table:
return self.search_loc_list(geohash_string[0:prefix_index_to_search + 1], location, k)
return []
"""
find the next largest number in geocashing error precision table.
we only have to serach from k to next largest error.
example k = 6.8km, we only need to search in error range <= 20km. which is a geohashing prefix with 4 charcters
@param: start, end, nums, target
@return: next bigger index in prefix error table
"""
def binary_search(self, start, end, nums, target):
while start + 1 < end:
mid = (start + end) // 2
if nums[mid] < target:
end = mid
else:
start = mid
if nums[end] > target:
return end
if nums[start] > target:
return start
return -1
"""
if k > lowest precision (2500km) in the geohashing algorithm, that means all restaurants need to be searched.
this should rarely happen
"""
def search_all_restaurants_for_nearest(self, location, k):
result = [] #(dist, restaurant name)
for restaurant_id, restaurant in self.restaurants.items():
curr_dist = Helper.get_distance(restaurant.location, location)
if curr_dist < k:
result.append((curr_dist, restaurant.name))
result.sort()
return [restaurant[1] for restaurant in result]
"""
search only 1 prefix for a location which distance < k
this simulate a single db on a single machine.
@param: prefix: geohashing prefix
@param: location, k
@return: a list of restaurant names
"""
def search_loc_list(self, prefix, location, k):
heap = []
result = []
for restaurant_id in self.geo_location_table[prefix]:
curr_dist = Helper.get_distance(self.restaurants[restaurant_id].location, location)
if curr_dist < k:
heapq.heappush(heap, (curr_dist, self.restaurants[restaurant_id].name))
while heap:
result.append(heapq.heappop(heap)[1])
return result
# @param {int} restaurant_id
# @return nothing
def remove_restaurant(self, restaurant_id):
# Write your code here
if restaurant_id in self.restaurants:
restaurant = self.restaurants[restaurant_id]
del self.restaurant_name_2_id[restaurant.name]
for prefix in self.restaurants_2_geolocation_prefix[restaurant_id]:
self.geo_location_table[prefix].remove(restaurant_id)
del self.restaurants_2_geolocation_prefix[restaurant_id]
del self.restaurants[restaurant_id]
# @param {Location} location
# @param {double} k, distance smaller than k miles
# @return {str[]} a list of restaurant's name and sort by
# distance from near to far.
def neighbors(self, location, k):
# Write your code here
return self.find_neighbors_by_gelocation(location, k)
```
#### File: algorithm/lintcode/521.1.py
```python
class Solution:
"""
@param nums: an array of integers
@return: the number of unique integers
"""
def deduplication(self, nums):
if not nums:
return 0
n = len(nums)
nums.sort()
j = 0
for i in range(n):
while j < n and nums[j] == nums[i]:
j += 1
if j >= n:
break
nums[i + 1] = nums[j]
return i + 1
```
#### File: algorithm/lintcode/535.py
```python
class Solution:
"""
@param root: The root of binary tree.
@return: The maximum amount of money you can rob tonight
"""
def houseRobber3(self, root):
# write your code here
m_not_in_p, m_in_p = self.dfs(root)
return max(m_not_in_p, m_in_p)
def dfs(self, root):
if not root:
return 0, 0
m_not_in_p_left, m_in_p_left = self.dfs(root.left)
m_not_in_p_right, m_in_p_right = self.dfs(root.right)
m_in_p = m_not_in_p_left + m_not_in_p_right + root.val
m_not_in_p = max(m_not_in_p_left, m_in_p_left) + max(m_not_in_p_right, m_in_p_right)
return m_not_in_p, m_in_p
```
#### File: algorithm/lintcode/537.py
```python
class NGram:
# @param {int} n a integer
# @param {str} string a string
def mapper(self, _, n, string):
# Write your code here
# Please use 'yield key, value' here
left, right = 0, n -1
m = len(string)
while right < m:
yield string[left: right + 1], 1
left += 1
right += 1
# @param key is from mapper
# @param values is a set of value with the same key
def reducer(self, key, values):
# Write your code here
# Please use 'yield key, value' here
count = 0
for _ in values:
count += 1
yield key, count
```
#### File: algorithm/lintcode/56.1.py
```python
class Solution:
"""
@param numbers: An array of Integer
@param target: target = numbers[index1] + numbers[index2]
@return: [index1 + 1, index2 + 1] (index1 < index2)
"""
def twoSum(self, numbers, target):
# write your code here
v = {}
for i, n in enumerate(numbers):
if target - n in v:
return [v[target - n], i]
v[n] = i
```
#### File: algorithm/lintcode/566.py
```python
class GFSClient(BaseGFSClient):
"""
@param: chunkSize: An integer
"""
def __init__(self, chunkSize):
# do intialization if necessary
super().__init__()
self.chunk_size = chunkSize
self.filename_2_number_of_chunks = {}
"""
@param: filename: a file name
@return: conetent of the file given from GFS
"""
def read(self, filename):
# write your code her
if filename not in self.filename_2_number_of_chunks:
return None
number_of_chunks = self.filename_2_number_of_chunks[filename]
result = ""
for chunk_index in range(number_of_chunks):
result += super().readChunk(filename, chunk_index)
return result
"""
@param: filename: a file name
@param: content: a string
@return: nothing
"""
def write(self, filename, content):
# write your code here
number_of_chunks = len(content) // self.chunk_size if len(content) % self.chunk_size == 0 else len(content) // self.chunk_size + 1
self.filename_2_number_of_chunks[filename] = number_of_chunks
for chunk_index in range(number_of_chunks):
super().writeChunk(filename, chunk_index, content[chunk_index * self.chunk_size: self.chunk_size * (chunk_index + 1)])
```
#### File: algorithm/lintcode/585.py
```python
class Solution:
"""
@param nums: a mountain sequence which increase firstly and then decrease
@return: then mountain top
"""
def mountainSequence(self, nums):
start, end = 0, len(nums) - 1
while start + 1 < end:
mid = start + (end - start) // 2
if nums[mid] > nums[mid + 1]:
end = mid
else:
start = mid
return max(nums[start], nums[end])
```
#### File: algorithm/lintcode/587.py
```python
class Solution:
"""
@param nums: an array of integer
@param target: An integer
@return: An integer
"""
def twoSum6(self, nums, target):
# write your code here
nums = sorted(nums)
count = 0
hash = set()
visited = set()
for i in range(len(nums)):
if target - nums[i] in hash and nums[i] not in visited:
count += 1
visited.add(nums[i])
hash.add(nums[i])
return count
s = Solution()
nums = [1,1]
target = 2
print(s.twoSum6(nums, target))
```
#### File: algorithm/lintcode/589.py
```python
class ConnectingGraph:
"""
@param: n: An integer
"""
def __init__(self, n):
# do intialization if necessary
self.fathers = {}
for i in range(n + 1):
self.fathers[i] = i
"""
@param: a: An integer
@param: b: An integer
@return: nothing
"""
def connect(self, a, b):
# write your code here
a_father = self.find(a)
b_father = self.find(b)
if a_father == b_father:
return
self.fathers[a_father] = b_father
# def find(self, x):
# j = x
# while self.fathers[j] != j:
# j = self.fathers[j]
# while (x != j):
# fx = self.fathers[x]
# self.fathers[x] = j
# x = fx
# return j
#recursion find
def find(self, x):
if self.fathers[x] == x:
return x
self.fathers[x] = self.find(self.fathers[x])
return self.fathers[x]
"""
@param: a: An integer
@param: b: An integer
@return: A boolean
"""
def query(self, a, b):
# write your code here
return self.find(a) == self.find(b)
```
#### File: algorithm/lintcode/591.py
```python
class ConnectingGraph3:
"""
@param a: An integer
@param b: An integer
@return: nothing
"""
def __init__(self, n):
# initialize your data structure here.
self.father = {}
self.count = 0
for i in range(1, n + 1):
self.father[i] = i
self.count += 1
def connect(self, a, b):
# write your code here
a_father = self.find(a)
b_father = self.find(b)
if a_father == b_father:
return
self.father[a_father] = b_father
self.count -= 1
def find(self, x):
j = x
while self.father[j] != j:
j = self.father[j]
while x != j:
xf = self.father[x]
self.father[x] = j
x = xf
return j
"""
@return: An integer
"""
def query(self):
# write your code here
return self.count
```
#### File: algorithm/lintcode/604.py
```python
class Solution:
"""
@param nums: a list of integers.
@param k: length of window.
@return: the sum of the element inside the window at each moving.
"""
def winSum(self, nums, k):
n = len(nums)
right = 0
curr_sum = 0
result = []
for left in range(n):
while right < n and right - left < k:
curr_sum += nums[right]
right += 1
result.append(curr_sum)
if right >= n:
break
curr_sum -= nums[left]
return result
```
#### File: algorithm/lintcode/610.py
```python
class Solution:
"""
@param nums: an array of Integer
@param target: an integer
@return: [num1, num2] (num1 < num2)
"""
def twoSum7(self, nums, target):
# write your code here
if not nums or len(nums) < 2:
return (-1, -1)
n = len(nums)
target = abs(target)
#nums[right] - nums[left] = target
for i in range(n):
result = self.binary_search(nums, i + 1, n - 1, target + nums[i])
if result != -1:
return nums[i], result
return (-1, -1)
def binary_search(self, nums, start, end, target):
while start + 1 < end:
mid = (start + end) // 2
if nums[mid] < target:
start = mid
else:
end = mid
if nums[start] == target:
return nums[start]
if nums[end] == target:
return nums[end]
return -1
```
#### File: algorithm/lintcode/627.Longest.Palindrome.py
```python
class Solution:
"""
@param s: a string which consists of lowercase or uppercase letters
@return: the length of the longest palindromes that can be built
"""
"""
按最大到小排序,排序完成以后只插入偶数,插入完以后插入最大的奇数
"""
def longestPalindrome(self, s):
# write your code here
dict = {}
# count = 0
# {dict[char]:dict[char] + 1 for char in s if dict[char] else dict[char]:1}
length = 0
print(s)
for char in s:
dict[char] =0
print(dict)
for char in s:
dict[char] += 1
print(dict)
max_odd = 0
for (char,count) in dict.items():
if count % 2 == 0:
length += count
else:
if count > max_odd:
max_odd = count
return length+max_odd
def main():
s = Solution()
print(s.longestPalindrome("NTrQdQGgwtxqRTSBOitAXUkwGLgUHtQOmYMwZlUxqZysKpZxRoehgirdMUgy"))
if __name__ == "__main__":
main()
```
#### File: algorithm/lintcode/645.py
```python
class Solution:
# @param {int} n a party with n people
# @return {int} the celebrity's label or -1
def findCelebrity(self, n):
# Write your code here
if not n or n == 0:
return -1
candidate = 0
for i in range(n):
# if Celebrity.knows(i, candidate):
# pass
if not Celebrity.knows(i, candidate):
candidate = i
for i in range(n):
if i == candidate:
continue
if not Celebrity.knows(i, candidate) or Celebrity.knows(candidate, i):
return -1
return candidate
```
#### File: algorithm/lintcode/654.py
```python
class Solution:
"""
@param A: a sparse matrix
@param B: a sparse matrix
@return: the result of A * B
"""
def multiply(self, A, B):
# write your code here
if not A or not B:
return -1
m = len(A)
n = len(A[0])
p = len(B[0])
row_vector = [
[
(j, A[i][j])
for j in range(n)
if A[i][j] != 0
]
for i in range(m)
]
col_vector = [
[
(i, B[i][j])
for i in range(n)
if B[i][j] != 0
]
for j in range(p)
]
result = [
[
self.multi(row, col)
for col in col_vector
]
for row in row_vector
]
return result
def multi(self, row, col):
i = j = 0
sum = 0
while i < len(row) and j < len(col):
index_row, val_row = row[i]
index_col, val_col = col[j]
if index_row < index_col:
i += 1
elif index_row > index_col:
j += 1
else:
sum += val_row * val_col
i += 1
j += 1
return sum
```
#### File: algorithm/lintcode/656.py
```python
class Solution:
"""
@param num1: a non-negative integers
@param num2: a non-negative integers
@return: return product of num1 and num2
"""
def multiply(self, num1, num2):
# write your code here
num1_lens = len(num1)
num2_lens = len(num2)
num3_lens = num1_lens + num2_lens
num3 = [0] * num3_lens
for i in range(num1_lens - 1, -1, -1):
for j in range(num2_lens - 1, -1, -1):
num3[i + j + 1] += (ord(num1[i]) - ord('0')) * (ord(num2[j]) - ord('0'))
for i in range(num3_lens - 2, -1, -1):
num3[i] += num3[i + 1] // 10
num3[i + 1] %= 10
#find first none zero location in result array
while i < num3_lens and num3[i] == 0:
i += 1
#when everything is 0
if i == num3_lens:
return "0"
result = ""
return result.join([chr(x + ord('0')) for x in num3[i:]])
```
#### File: algorithm/lintcode/660.1.py
```python
class Solution:
def __init__(self):
self.last_buffer = [None] * 4
self.next_read = 0
self.count_in_buffer = 0
# @param {char[]} buf destination buffer
# @param {int} n maximum number of characters to read
# @return {int} the number of characters read
def read(self, buf, n):
# Write your code here
count = self.readBuf(buf, 0, n)
while count < n:
cnt = Reader.read4(self.last_buffer)
self.count_in_buffer += cnt
if cnt == 0:
return count
count += self.readBuf(buf, count, n - count)
return count
def readBuf(self, buf, i, n):
count = 0
while self.count_in_buffer and count < n:
buf[i] = (self.last_buffer[self.next_read % 4])
self.next_read = (self.next_read + 1) % 4
self.count_in_buffer -= 1
count += 1
i+=1
return count
```
#### File: algorithm/lintcode/660.py
```python
class Solution:
def __init__(self):
self.buf = [None] * 4
self.next_w = 0
self.next_r = 0
# @param {char[]} buf destination buffer
# @param {int} n maximum number of characters to read
# @return {int} the number of characters read
def read(self, buf, n):
# Write your code here
i = 0
while i < n:
if self.next_r == self.next_w:
self.next_r, self.next_w = 0, Reader.read4(self.buf)
if self.next_w == self.next_r:
break
buf[i], i, self.next_r = self.buf[self.next_r], i + 1, self.next_r + 1
return i
```
#### File: algorithm/lintcode/761.py
```python
class Solution:
"""
@param arr: an array of non-negative integers
@return: minimum number of elements
"""
def minElements(self, arr):
# write your code here
if not arr:
return -1
arr = sorted(arr)
n = len(arr)
prefix_sum = [0] * (n + 1)
for i in range(1, n + 1):
prefix_sum[i] = prefix_sum[i - 1] + arr[i - 1]
now_sum = 0
count = 0
for i in range(n - 1, -1, -1):
now_sum += arr[i]
count += 1
if now_sum > prefix_sum[i]:
return count
return -1
```
#### File: algorithm/lintcode/802.2.py
```python
class Solution:
"""
@param board: the sudoku puzzle
@return: nothing
"""
def solveSudoku(self, board):
# write your code here
self.dfs(board, self.build_used(board))
def build_used(self, board):
used = {
"row": [set() for _ in range(9)],
"col": [set() for _ in range(9)],
"box": [set() for _ in range(9)]
}
for i in range(9):
for j in range(9):
if board[i][j] != 0:
self.mark_used(i, j, board, used, unmark=False)
return used
def mark_used(self, i, j, board, used, unmark=False):
if unmark == False:
used["row"][i].add(board[i][j])
used["col"][j].add(board[i][j])
used["box"][i // 3 * 3 + j // 3].add(board[i][j])
else:
used["row"][i].remove(board[i][j])
used["col"][j].remove(board[i][j])
used["box"][i // 3 * 3 + j // 3].remove(board[i][j])
"""
def searching start at the least choice position
"""
def dfs(self, board, used):
x, y, choices = self.get_least_choice_position(board, used)
if x == None: #end, same as if index == 81:
return True
for num in choices:
board[x][y] = num
self.mark_used(x, y, board, used)
if self.dfs(board, used):
return True
self.mark_used(x, y, board, used, unmark=True)
board[x][y] = 0
return False #if there is no choices, that means cannot reach end
def get_least_choice_position(self, board, used):
x, y, choices = None, None, [None] * 10
for i in range(9):
for j in range(9):
if board[i][j] != 0:
continue
choices_at_pos = []
for num in range(1, 10):
if self.is_valid(num, i, j, board, used):
choices_at_pos.append(num)
if len(choices_at_pos) < len(choices):
x, y, choices = i, j, choices_at_pos
return x, y, choices
def is_valid(self, num, x, y, board, used):
if num in used["row"][x] or num in used["col"][y] or num in used["box"][x // 3 * 3 + y // 3]:
return False
return True
```
#### File: algorithm/lintcode/815.py
```python
class Solution:
"""
@param n: an integer, denote the number of courses
@param p: a list of prerequisite pairs
@return: return an integer,denote the number of topologicalsort
"""
def topologicalSortNumber(self, n, p):
# Write your code here
map = self.build_map(n, p)
in_degrees = self.build_indegree(map)
visited = set()
total_ways = [0]
self.dfs(n, map, in_degrees, 0, total_ways, visited)
return total_ways[0]
def build_map(self, n, p): #prerequisite->course
map = {x:set() for x in range(n)}
for course, prerequisite in p:
map[prerequisite].add(course)
return map
def build_indegree(self, map):
in_degrees = {x:0 for x in map}
for course, prerequisites in map.items():
for neighbour in prerequisites:
in_degrees[neighbour] = in_degrees.get(neighbour, 0) + 1
return in_degrees
def dfs(self, n, map, in_degrees, course_count, total_ways, visited):
if course_count == n:
total_ways[0] += 1
return
for node in in_degrees:
if node in visited:
continue
if in_degrees[node] == 0:
visited.add(node)
for neighbour in map[node]:
in_degrees[neighbour] -= 1
self.dfs(n, map, in_degrees, course_count + 1, total_ways, visited)
for neighbour in map[node]:
in_degrees[neighbour] += 1
visited.remove(node)
```
#### File: algorithm/lintcode/832.py
```python
class Solution:
"""
@param nums: the sorted matrix
@return: the number of Negative Number
"""
def countNumber(self, nums):
# Write your code here
count = 0
if not nums:
return count
n = len(nums)
for row in nums:
if len(row) > 0 and row[0] >= 0:
continue
count += self.binary_search(row, len(row))
return count
def binary_search(self, nums, m):
start, end = 0, m - 1
left, right = start, end
while left + 1 < right:
mid = (left + right) // 2
if nums[mid] < 0:
left = mid
elif nums[mid] > 0:
right = mid
else:
right = mid
if nums[right] < 0:
return right + 1
if nums[left] < 0:
return left + 1
return 0
```
#### File: algorithm/lintcode/833.1.py
```python
"""
Definition of Interval.
class Interval(object):
def __init__(self, start, end):
self.start = start
self.end = end
"""
import sys
class Solution:
"""
@param logs: Sequence of processes
@param queries: Sequence of queries
@return: Return the number of processes
"""
def numberOfProcesses(self, logs, queries):
# Write your code here
new_log = []
for interval in logs:
new_log.append((interval.start, 0))
new_log.append((interval.end, 1))
for i in queries:
new_log.append((i, 2))
new_log.sort()
curr_sum = 0
query_result = {}
for i, status in new_log:
if status == 0:
curr_sum += 1
elif status == 1:
curr_sum -= 1
if status == 2:
query_result[i] = curr_sum
return [query_result[x] for x in queries]
```
#### File: algorithm/lintcode/833.2.py
```python
import sys
class Solution:
"""
@param logs: Sequence of processes
@param queries: Sequence of queries
@return: Return the number of processes
"""
def numberOfProcesses(self, logs, queries):
# Write your code here
new_log = []
for interval in logs:
new_log.append((interval.start, 1))
new_log.append((interval.end, -1))
for i in queries:
new_log.append((i, 0))
new_log.sort(key=lambda x: (x[0], -abs(x[1])))
curr_sum = 0
query_result = {}
for i, delta in new_log:
curr_sum += delta
if delta == 0:
query_result[i] = curr_sum
return [query_result[x] for x in queries]
```
#### File: algorithm/lintcode/838.py
```python
class Solution:
"""
@param nums: a list of integer
@param k: an integer
@return: return an integer, denote the number of continuous subarrays whose sum equals to k
"""
def subarraySumEqualsK(self, nums, k):
# write your code here
seen = {0:1}
nowSum = 0
res = 0
for num in nums:
nowSum += num
if nowSum - k in seen:
res += seen[nowSum - k]
seen[nowSum] = seen.get(nowSum, 0) + 1
return res
```
#### File: algorithm/lintcode/859.1.py
```python
import heapq
class MaxStack:
def __init__(self):
# do intialization if necessary
self.stack = []
self.max_heap = []
self.deleted = set()
self.id = 0
"""
@param: number: An integer
@return: nothing
"""
def push(self, x): #O(logn)
# write your code here
item = (-x, -self.id)
self.stack.append(item)
heapq.heappush(self.max_heap, item)
self.id += 1
"""
@return: An integer
"""
def pop(self): #O(logn)
# write your code here
self.clean_up_stack()
if self.stack:
top = self.stack.pop()
self.deleted.add(top)
return -top[0]
"""
@return: An integer
"""
def top(self): #O(1)
# write your code here
self.clean_up_stack()
if len(self.stack) > 0:
return -self.stack[-1][0]
"""
@return: An integer
"""
def peekMax(self): #O(logn)
# write your code here
self.clean_up_heap()
if self.max_heap:
max_val, _ = self.max_heap[0]
return -max_val
"""
@return: An integer
"""
def popMax(self): #O(logn)
# write your code here
self.clean_up_heap()
if self.max_heap:
top = heapq.heappop(self.max_heap)
self.deleted.add(top)
return -top[0]
def clean_up_stack(self):
while self.stack and self.stack[-1] in self.deleted:
self.deleted.remove(self.stack.pop())
def clean_up_heap(self):
while self.max_heap and self.max_heap[0] in self.deleted:
self.deleted.remove(heapq.heappop(self.max_heap))
```
#### File: algorithm/lintcode/89.py
```python
class Solution:
"""
@param A: An integer array
@param k: A positive integer (k <= length(A))
@param target: An integer
@return: An integer
"""
def kSum(self, A, K, target):
# write your code here
dp = [[[0] * (target + 1) for _ in range(K + 1)] for _ in range(len(A))]
for t in range(target + 1):
if A[0] == t:
dp[0][1][t] = 1
for i in range(len(A)):
dp[i][0][0] = 1
for i in range(1, len(A)):
for k in range(1, K + 1):
for t in range(target + 1):
dp[i][k][t] = dp[i - 1][k][t]
if t >= A[i]:
dp[i][k][t] += dp[i - 1][k - 1][t - A[i]]
return (dp[len(A) - 1][K][target])
s = Solution()
A = [1,2,3,4]
K = 2
target=5
print(s.kSum(A,K,target))
```
#### File: algorithm/lintcode/902.py
```python
class Solution:
"""
@param root: the given BST
@param k: the given k
@return: the kth smallest element in BST
"""
def kthSmallest(self, root, k):
# write your code here
k_index, smallest_k = self.find_kth_smallest_index(root, 0, k)
return smallest_k
"""
@param root: current node
@param target_k: target
@return current_index
@return smallest_k if already otherwise None
直接用in-order traversal
分治法:如果左子树已经找到了,就返回左子树的结果,如果没有,就+1 然后看等不等于k, 如果等于就返回本颗子树,如果右边子树已经找到了就返回右边子树找到的结果。假如都没有,就返回右边子树的index因为inorder traversal右边子树返回的index才是当前已经搜索过的个数。这个方法因为遍历了所有的点,所以时间复杂度O(N).
更好的方法是用lower bound, upper bound, 时间复杂度是O(h), 因为只需要找2次树的高度就够了。(https://www.jiuzhang.com/solution/closest-binary-search-tree-value/#tag-highlight-lang-java)
"""
def find_kth_smallest_index(self, root, current_index, target_index):
if root is None:
return current_index, None
k_index_left, smallest_k_left = self.find_kth_smallest_index(root.left, current_index, target_index)
if k_index_left == target_index:
return k_index_left, smallest_k_left
current_index = k_index_left + 1
if current_index == target_index:
return current_index, root.val
k_index_right, smallest_k_right = self.find_kth_smallest_index(root.right, current_index, target_index)
if k_index_right == target_index:
return k_index_right, smallest_k_right
return k_index_right, smallest_k_right
```
#### File: algorithm/lintcode/949.fast_power.class.py
```python
import sys
class Matrix:
def __init__(self, m, n, mod=sys.maxsize, unit_matrix=False): #create m * n matrix
self.m = m
self.n = n
self.mod = mod
self.mat = [[0] * n for _ in range(m)]
if unit_matrix:
self.set_unit_matrix()
def __mul__(self, matrix):
result = Matrix(self.m, matrix.n)
result.mod = self.mod
if matrix.m != self.n:
return -1
p = matrix.n
for i in range(self.m):
for j in range(matrix.n):
for k in range(self.n):
result.mat[i][j] += ((self.mat[i][k] % self.mod) * (matrix.mat[k][j] % self.mod)) % self.mod
result.mat[i][j] %= self.mod
return result
def set_unit_matrix(self): #m * m unit matrix
for i in range(self.m):
self.mat[i][i] = 1
class Solution:
"""
@param n: an integer
@return: return an int
"""
def lastFourDigitsOfFn(self, n):
MOD = 10000
# write your code here
if n == 0:
return 0
if n == 1:
return 1
base_matrix = Matrix(2, 2, MOD)
base_matrix.mat = [[1, 1],[1, 0]]
result_matrix = Matrix(2,2, MOD, unit_matrix=True)
power = n - 1
while power > 0: #快速幂非递归写法,参见lintcode note. fast power lintcode 140
if power & 1:
result_matrix = result_matrix * base_matrix
base_matrix = base_matrix * base_matrix
power >>= 1
return result_matrix.mat[0][0]
s = Solution()
print(s.lastFourDigitsOfFn(5531354))
```
#### File: algorithm/lintcode/976.2.py
```python
class Solution:
"""
@param A: a list
@param B: a list
@param C: a list
@param D: a list
@return: how many tuples (i, j, k, l) there are such that A[i] + B[j] + C[k] + D[l] is zero
"""
def fourSumCount(self, A, B, C, D):
# Write your code here
hash_map = {}
for i in A:
for j in B:
hash_map[i + j] = hash_map.get(i + j, 0) + 1
count = 0
for i in C:
for j in D:
count += hash_map.get(- (i + j), 0)
return count
s = Solution()
A = [ 1, 2]
B = [-2,-1]
C = [-1, 2]
D = [ 0, 2]
print(s.fourSumCount(A,B,C,D))
``` |
{
"source": "Jianfei2333/difference_equation",
"score": 3
} |
#### File: difference_equation/two-dimension/integration.py
```python
from numpy import *
raster_count = 50
# 积分计算函数
# 输入参数:
# function u 被积函数
# num l 积分下界
# num r 积分上界
def rectangle (u,l,r):
X = linspace(l,r,raster_count+1)
step = abs(r-l)/raster_count
height = u(X[:-1])
res = 0
for item in height:
res += item*step
return res
# 二重积分计算函数
# 输入参数:
# function u 被积函数 双参数(x,y)
# num l1 x坐标下界
# num r1 x坐标上界
# num l2 y坐标下界
# num r2 y坐标上界
def double_rec(u,l1,r1,l2,r2):
Y = linspace(l2,r2,raster_count+1)
step2 = abs(r2-l2)/raster_count
res = 0
for item in Y:
f_in_Oxz = lambda y: lambda x: u(x,y)
res += step2*rectangle(f_in_Oxz(item),l1,r1)
return (res)
``` |
{
"source": "Jianfei2333/pytorch-adversarial-training",
"score": 3
} |
#### File: Jianfei2333/pytorch-adversarial-training/attacker.py
```python
import torch
import torch.nn as nn
class LinfPGD(nn.Module):
"""Projected Gradient Decent(PGD) attack.
Can be used to adversarial training.
"""
def __init__(self, model, epsilon=8/255, step=2/255, iterations=20, criterion=None, random_start=True, targeted=False):
super(LinfPGD, self).__init__()
# Arguments of PGD
self.device = next(model.parameters()).device
self.model = model
self.epsilon = epsilon
self.step = step
self.iterations = iterations
self.random_start = random_start
self.targeted = targeted
self.criterion = criterion
if self.criterion is None:
self.criterion = lambda model, input, target: nn.functional.cross_entropy(model(input), target)
# Model status
self.training = self.model.training
def project(self, perturbation):
# Clamp the perturbation to epsilon Lp ball.
return torch.clamp(perturbation, -self.epsilon, self.epsilon)
def compute_perturbation(self, adv_x, x):
# Project the perturbation to Lp ball
perturbation = self.project(adv_x - x)
# Clamp the adversarial image to a legal 'image'
perturbation = torch.clamp(x+perturbation, 0., 1.) - x
return perturbation
def onestep(self, x, perturbation, target):
# Running one step for
adv_x = x + perturbation
adv_x.requires_grad = True
atk_loss = self.criterion(self.model, adv_x, target)
self.model.zero_grad()
atk_loss.backward()
grad = adv_x.grad
# Essential: delete the computation graph to save GPU ram
adv_x.requires_grad = False
if self.targeted:
adv_x = adv_x.detach() - self.step * torch.sign(grad)
else:
adv_x = adv_x.detach() + self.step * torch.sign(grad)
perturbation = self.compute_perturbation(adv_x, x)
return perturbation
def _model_freeze(self):
for param in self.model.parameters():
param.requires_grad=False
def _model_unfreeze(self):
for param in self.model.parameters():
param.requires_grad=True
def random_perturbation(self, x):
perturbation = torch.rand_like(x).to(device=self.device)
perturbation = self.compute_perturbation(x+perturbation, x)
return perturbation
def attack(self, x, target):
x = x.to(self.device)
target = target.to(self.device)
self.training = self.model.training
self.model.eval()
self._model_freeze()
perturbation = torch.zeros_like(x).to(self.device)
if self.random_start:
perturbation = self.random_perturbation(x)
with torch.enable_grad():
for i in range(self.iterations):
perturbation = self.onestep(x, perturbation, target)
self._model_unfreeze()
if self.training:
self.model.train()
return x + perturbation
class L2PGD(nn.Module):
"""Projected Gradient Decent(PGD) attack.
Can be used to adversarial training.
"""
def __init__(self, model, epsilon=5, step=1, iterations=20, criterion=None, random_start=True, targeted=False):
super(L2PGD, self).__init__()
# Arguments of PGD
self.device = next(model.parameters()).device
self.model = model
self.epsilon = epsilon
self.step = step
self.iterations = iterations
self.random_start = random_start
self.targeted = targeted
self.criterion = criterion
if self.criterion is None:
self.criterion = lambda model, input, target: nn.functional.cross_entropy(model(input), target)
# Model status
self.training = self.model.training
def project(self, perturbation):
# Clamp the perturbation to epsilon Lp ball.
return perturbation.renorm(p=2, dim=0, maxnorm=self.epsilon)
def compute_perturbation(self, adv_x, x):
# Project the perturbation to Lp ball
perturbation = self.project(adv_x - x)
# Clamp the adversarial image to a legal 'image'
perturbation = torch.clamp(x+perturbation, 0., 1.) - x
return perturbation
def onestep(self, x, perturbation, target):
# Running one step for
adv_x = x + perturbation
adv_x.requires_grad = True
atk_loss = self.criterion(self.model, adv_x, target)
self.model.zero_grad()
atk_loss.backward()
grad = adv_x.grad
g_norm = torch.norm(grad.view(x.shape[0], -1), p=2, dim=1).view(-1, *([1]*(len(x.shape)-1)))
grad = grad / (g_norm + 1e-10)
# Essential: delete the computation graph to save GPU ram
adv_x.requires_grad = False
if self.targeted:
adv_x = adv_x.detach() - self.step * grad
else:
adv_x = adv_x.detach() + self.step * grad
perturbation = self.compute_perturbation(adv_x, x)
return perturbation
def _model_freeze(self):
for param in self.model.parameters():
param.requires_grad=False
def _model_unfreeze(self):
for param in self.model.parameters():
param.requires_grad=True
def random_perturbation(self, x):
perturbation = torch.rand_like(x).to(device=self.device)
perturbation = self.compute_perturbation(x+perturbation, x)
return perturbation
def attack(self, x, target):
x = x.to(self.device)
target = target.to(self.device)
self.training = self.model.training
self.model.eval()
self._model_freeze()
perturbation = torch.zeros_like(x).to(self.device)
if self.random_start:
perturbation = self.random_perturbation(x)
with torch.enable_grad():
for i in range(self.iterations):
perturbation = self.onestep(x, perturbation, target)
self._model_unfreeze()
if self.training:
self.model.train()
return x + perturbation
```
#### File: Jianfei2333/pytorch-adversarial-training/runner.py
```python
from tqdm.auto import tqdm
from utils import AverageMeter
import torch
from utils import collect
class DistRunner():
def __init__(self, epochs, model, train_loader, test_loader, criterion, optimizer, scheduler, attacker, device):
self.device = device
self.epochs = epochs
self.eval_interval = 20
self.model = model
self.train_loader = train_loader
self.test_loader = test_loader
self.criterion = criterion
self.optimizer = optimizer
self.scheduler = scheduler
self.attacker = attacker
self.desc = lambda status, progress: f"{status}: {progress}"
def clean_step(self, progress):
self.model.train()
loss_meter = AverageMeter()
pbar = tqdm(total=len(self.train_loader), leave=False, desc=self.desc("Clean train", progress))
for batch_idx, (data, target) in enumerate(self.train_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
pbar.set_postfix_str("Loss {:.4f}".format(loss.item()))
loss_meter.update(loss.item())
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
pbar.update(1)
pbar.close()
return loss_meter.report()
def adv_step(self, progress):
self.model.train()
loss_meter = AverageMeter()
pbar = tqdm(total=len(self.train_loader), leave=False, desc=self.desc("Adv train", progress))
for batch_idx, (data, target) in enumerate(self.train_loader):
data, target = data.to(self.device), target.to(self.device)
data = self.attacker.attack(data, target)
output = self.model(data)
loss = self.criterion(output, target)
pbar.set_postfix_str("Loss {:.4f}".format(loss.item()))
loss_meter.update(loss.item())
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
pbar.update(1)
pbar.close()
return loss_meter.report()
def clean_eval(self, progress):
self.model.eval()
accuracy_meter = AverageMeter()
loss_meter = AverageMeter()
with torch.no_grad():
pbar = tqdm(total=len(self.test_loader), leave=False, desc=self.desc("Clean eval", progress))
for batch_idx, (data, target) in enumerate(self.test_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
loss_meter.update(loss.item())
pred = output.argmax(dim=1)
true_positive = (pred == target).sum().item()
total = pred.shape[0]
accuracy_meter.update(true_positive, total)
pbar.update(1)
pbar.close()
return (loss_meter.report(), accuracy_meter.sum, accuracy_meter.count)
def adv_eval(self, progress):
self.model.eval()
accuracy_meter = AverageMeter()
loss_meter = AverageMeter()
with torch.no_grad():
pbar = tqdm(total=len(self.test_loader), leave=False, desc=self.desc("Adv eval", progress))
for batch_idx, (data, target) in enumerate(self.test_loader):
data, target = data.to(self.device), target.to(self.device)
data = self.attacker.attack(data, target)
output = self.model(data)
loss = self.criterion(output, target)
loss_meter.update(loss.item())
pred = output.argmax(dim=1)
true_positive = (pred == target).sum().item()
total = pred.shape[0]
accuracy_meter.update(true_positive, total)
pbar.update(1)
pbar.close()
return (loss_meter.report(), accuracy_meter.sum, accuracy_meter.count)
def train(self, adv=True):
(avg_loss, acc_sum, acc_count) = self.adv_eval("Adv init")
avg_loss = collect(avg_loss, self.device)
avg_acc = collect(acc_sum, self.device, mode='sum') / collect(acc_count, self.device, mode='sum')
if torch.distributed.get_rank() == 0:
tqdm.write("Eval (Adver) init, Loss avg. {:.4f}, Acc. {:.4f}".format(avg_loss, avg_acc))
(avg_loss, acc_sum, acc_count) = self.clean_eval("Clean init")
avg_loss = collect(avg_loss, self.device)
avg_acc = collect(acc_sum, self.device, mode='sum') / collect(acc_count, self.device, mode='sum')
if torch.distributed.get_rank() == 0:
tqdm.write("Eval (Clean) init, Loss avg. {:.4f}, Acc. {:.4f}".format(avg_loss, avg_acc))
for epoch_idx in range(self.epochs):
if adv:
avg_loss = self.adv_step("{}/{}".format(epoch_idx, self.epochs))
else:
avg_loss = self.clean_step("{}/{}".format(epoch_idx, self.epochs))
avg_loss = collect(avg_loss, self.device)
if torch.distributed.get_rank() == 0:
if adv:
tqdm.write("Adv training procedure {} (total {}), Loss avg. {:.4f}".format(epoch_idx, self.epochs, avg_loss))
else:
tqdm.write("Clean training procedure {} (total {}), Loss avg. {:.4f}".format(epoch_idx, self.epochs, avg_loss))
if self.scheduler is not None:
self.scheduler.step()
if epoch_idx % self.eval_interval == (self.eval_interval-1):
avg_loss, acc_sum, acc_count = self.adv_eval("{}/{}".format(epoch_idx, self.epochs))
avg_loss = collect(avg_loss, self.device)
avg_acc = collect(acc_sum, self.device, mode='sum') / collect(acc_count, self.device, mode='sum')
if torch.distributed.get_rank() == 0:
tqdm.write("Eval (Adver) {}/{}, Loss avg. {:.4f}, Acc. {:.4f}".format(epoch_idx, self.epochs, avg_loss, avg_acc))
avg_loss, acc_sum, acc_count = self.clean_eval("{}/{}".format(epoch_idx, self.epochs))
avg_loss = collect(avg_loss, self.device)
avg_acc = collect(acc_sum, self.device, mode='sum') / collect(acc_count, self.device, mode='sum')
if torch.distributed.get_rank() == 0:
tqdm.write("Eval (Clean) {}/{}, Loss avg. {:.4f}, Acc. {:.4f}".format(epoch_idx, self.epochs, avg_loss, avg_acc))
tqdm.write("Finish training on rank {}!".format(torch.distributed.get_rank()))
```
#### File: Jianfei2333/pytorch-adversarial-training/utils.py
```python
import torch
class AverageMeter():
def __init__(self):
self.reset()
def reset(self):
# self.val = 0
self.sum = 0.
self.count = 0
def update(self, val, n=1):
self.sum += val
self.count += n
def report(self):
return (self.sum / self.count)
def collect(x, device, mode='mean'):
xt = torch.tensor([x]).to(device)
torch.distributed.all_reduce(xt, op=torch.distributed.ReduceOp.SUM)
# print(xt.item())
xt = xt.item()
if mode == 'mean':
xt /= torch.distributed.get_world_size()
return xt
def get_device_id():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
return args.local_rank
``` |
{
"source": "Jianfei2333/pytorch-GAN",
"score": 3
} |
#### File: Jianfei2333/pytorch-GAN/dataset.py
```python
import glob
import os
import torchvision.transforms as T
from PIL import Image
from torch.utils.data import Dataset
class CelebA(Dataset):
"""Dataset CelebA
Class number: 1
Train data number: 202599
Test data number: 0
"""
def __init__(self, dataroot, n_data=None, transform=None, train=True):
# Initial parameters
self.dataroot = os.path.join(dataroot, "CelebA")
self.train = train
if n_data:
self.n_data = n_data
if transform: # Set default transforms if no transformation provided.
self.transform = transform
else:
self.transform = T.Compose([
T.Resize((64, 64)),
T.ToTensor(),
T.Normalize((.5, .5, .5), (.5, .5, .5))
])
# Metadata of dataset
classes = ["face"]
self.class_num = len(classes)
self.classes = classes
self.class_to_idx = {"face": 0}
self.idx_to_class = {0: "face"}
# Split file and image path list.
if self.train:
if n_data:
self.img_paths = glob.glob(os.path.join(self.dataroot, 'data', '*.jpg'))[:n_data]
else:
self.img_paths = glob.glob(os.path.join(self.dataroot, 'data', '*.jpg'))
self.targets = [0] * len(self.img_paths)
else:
self.img_paths = []
self.targets = []
def __len__(self):
return len(self.img_paths)
def __getitem__(self, idx):
img_path = self.img_paths[idx]
img = Image.open(img_path)
img_tensor = self.transform(img)
target = self.targets[idx]
return (img_tensor, target)
def __repr__(self):
repr = """CelebA Dataset:
\tRoot location: {}
\tSplit: {}
\tClass num: {}
\tData num: {}""".format(self.dataroot, 'Train' if self.train else 'Test', self.class_num, self.__len__())
return repr
```
#### File: Jianfei2333/pytorch-GAN/model.py
```python
import torch.nn as nn
class DCGAN(nn.Module):
def __init__(self, nz, ngf, ndf, nch):
super(DCGAN, self).__init__()
# Dimension of latent vector z.
self.nz = nz
# Dimension of generator feature map.
self.ngf = ngf
# Dimension of discriminator feature map.
self.ndf = ndf
# Dimension of generator output image channel.
self.nch = nch
# Generator
self.generator = nn.Sequential(
nn.ConvTranspose2d(nz, ngf*8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf*8),
nn.ReLU(True),
nn.ConvTranspose2d(ngf*8, ngf*4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf*4),
nn.ReLU(True),
nn.ConvTranspose2d(ngf*4, ngf*2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf*2),
nn.ReLU(True),
nn.ConvTranspose2d(ngf*2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
nn.ConvTranspose2d(ngf, nch, 4, 2, 1, bias=False),
nn.Tanh()
)
# Discriminator
self.discriminator = nn.Sequential(
nn.Conv2d(nch, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf, ndf*2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf*2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf*2, ndf*4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf*4),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf*4, ndf*8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf*8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf*8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
# Initialize the weights.
self.generator.apply(self._init_weights)
self.discriminator.apply(self._init_weights)
@staticmethod
def _init_weights(m):
""" Initialize the weights of Convolution and Batch Normalization layers with normal distribution.
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
def dis_run(self, x):
# Run the discriminator
# Input: Image tensor x (b, nch, h, w)
return self.discriminator(x)
def gen_run(self, z):
# Run the generator
# Input: latent vector z (b, nz)
return self.generator(z)
``` |
{
"source": "Jianfei2333/torch-utils",
"score": 2
} |
#### File: torch-utils/datasets/KTH.py
```python
import glob
import os
import torchvision.transforms as T
from PIL import Image
from torch.utils.data import Dataset
class KTH(Dataset):
def __init__(self, dataroot, transform=None, train=True):
self.dataroot = os.path.join(dataroot, "KTH")
self.train = train
if transform:
self.transform = transform
else:
self.transform = T.Compose([
T.RandomHorizontalFlip(),
T.RandomRotation((0, 30)),
T.Resize((256, 256)),
T.RandomResizedCrop((224, 224)),
T.ToTensor(),
T.Normalize((.485, .456, .406), (.229, .224, .225))
])
# classes =
classes = [i.split('/')[-1] for i in glob.glob(os.path.join(self.dataroot, 'data', 'train', '*'))]
self.class_num = len(classes)
self.classes = classes
self.class_to_idx = {item: idx for (idx, item) in enumerate(classes)}
self.idx_to_class = {idx: item for (idx, item) in enumerate(classes)}
if train:
self.img_paths = glob.glob(os.path.join(self.dataroot, 'data', 'train', '*', '*'))
self.targets = [self.class_to_idx[i.split('/')[-2]] for i in self.img_paths]
else:
self.img_paths = glob.glob(os.path.join(self.dataroot, 'data', 'test', '*', '*'))
self.targets = [self.class_to_idx[i.split('/')[-2]] for i in self.img_paths]
def __len__(self):
return len(self.img_paths)
def __getitem__(self, idx):
img_path = self.img_paths[idx]
img = Image.open(img_path).convert('RGB')
img_tensor = self.transform(img)
target = self.targets[idx]
return (img_tensor, target)
def __repr__(self):
repr = """KTH Dataset:
\tRoot location: {}
\tSplit: {}
\tClass num: {}
\tData num: {}""".format(self.dataroot, 'Train' if self.train else 'Test', self.class_num, self.__len__())
return repr
if __name__ == '__main__':
data = KTH(os.environ["DATAROOT"])
print(data)
testdata = KTH(os.environ["DATAROOT"], train=False)
print(testdata)
``` |
{
"source": "JianfeiHu/pai",
"score": 2
} |
#### File: job-exporter/src/collector.py
```python
import re
import datetime
import logging
import threading
import subprocess
import time
import copy
from prometheus_client import make_wsgi_app, Counter, Gauge, Histogram
from prometheus_client.core import GaugeMetricFamily
import network
import utils
import docker_inspect
import docker_stats
import nvidia
logger = logging.getLogger(__name__)
##### collector will generate following metrics
# Document about these metrics is in `` # TODO
iteration_counter = Counter("collector_iteration_count", "total number of iteration",
["name"])
def gen_docker_daemon_counter():
return GaugeMetricFamily("docker_daemon_count",
"count of docker daemon",
labels=["error"])
def gen_gpu_util_gauge():
return GaugeMetricFamily("nvidiasmi_utilization_gpu",
"gpu core utilization of card",
labels=["minor_number"])
def gen_gpu_mem_util_gauge():
return GaugeMetricFamily("nvidiasmi_utilization_memory",
"gpu memory utilization of card",
labels=["minor_number"])
class ResourceGauges(object):
def __init__(self):
self.task_labels = [
"container_env_PAI_TASK_INDEX",
"container_label_PAI_CURRENT_TASK_ROLE_NAME",
"container_label_PAI_HOSTNAME",
"container_label_PAI_JOB_NAME",
"container_label_PAI_USER_NAME"
]
self.service_labels = ["name"]
self.task_labels_gpu = copy.deepcopy(self.task_labels)
self.task_labels_gpu.append("minor_number")
self.gauges = {}
self.add_task_and_service_gauge("{0}_cpu_percent",
"how much percent of cpu this {0} used")
self.add_task_and_service_gauge("{0}_mem_usage_byte",
"how much memory this {0} used")
self.add_task_and_service_gauge("{0}_mem_usage_percent",
"how much percent of memory this {0} used")
self.add_task_and_service_gauge("{0}_mem_limit_byte",
"how much memory this {0} are constrained to")
self.add_task_and_service_gauge("{0}_net_in_byte",
"how much network inbound this task used")
self.add_task_and_service_gauge("{0}_net_out_byte",
"how much network outbound this {0} used")
self.add_task_and_service_gauge("{0}_block_in_byte",
"how much block inbound this {0} used")
self.add_task_and_service_gauge("{0}_block_out_byte",
"how much block outbound this {0} used")
self.add_gauge("task_gpu_percent",
"how much percent of gpu core this task used",
self.task_labels_gpu)
self.add_gauge("task_gpu_mem_percent",
"how much percent of gpu memory this task used",
self.task_labels_gpu)
def add_task_and_service_gauge(self, name_tmpl, desc_tmpl):
self.add_gauge(
name_tmpl.format("task"),
desc_tmpl.format("task"),
self.task_labels)
self.add_gauge(
name_tmpl.format("service"),
desc_tmpl.format("service"),
self.service_labels)
def add_gauge(self, name, desc, labels):
self.gauges[name] = GaugeMetricFamily(name, desc, labels=labels)
def add_value(self, metric_name, labels, val):
if metric_name not in self.gauges:
raise RuntimeError(
"{0} not found in gauges, all gauge names is {1}".format(
metric_name, ",".join(self.gauges.keys())))
gauge = self.gauges[metric_name]
# because prometheus library requires label provided as array, we
# preprocess the labels and check any missing labels
label_array = [None] * len(gauge._labelnames)
for k, v in labels.items():
try:
index = gauge._labelnames.index(k)
label_array[index] = v
except ValueError:
logger.warning("unknown label %s with value %s for metrics %s",
k, v, metric_name)
continue
for i, label_val in enumerate(label_array):
if label_val is None:
logger.error(
"not provided %s as label value for metric %s, ignore this metric",
gauge._labelnames[i], metric_name)
return
gauge.add_metric(label_array, val)
def as_array(self):
return self.gauges.values()
#####
class AtomicRef(object):
""" a thread safe way to store and get object,
should not modify data get from this ref """
def __init__(self):
self.data = None
self.lock = threading.RLock()
def get_and_set(self, new_data):
data = None
with self.lock:
data, self.data = self.data, new_data
return data
def get(self):
with self.lock:
return self.data
class Collector(object):
""" collector is a model running in thread and responsible for collecting
some metrics, we use thread because we do not want to let hanging in one
collector can not have impact on other collectors. This is base class,
real collector should inhernit this class and implement collect_impl,
metrics are returned as an array."""
def __init__(self, name, sleep_time, atomic_ref, iteration_counter):
self.name = name
self.sleep_time = sleep_time
self.atomic_ref = atomic_ref
self.iteration_counter = iteration_counter
histogram_key = "collector_%s_iteration_lantecy_seconds" % self.name
histogram_desc = "latency for execute one interation of %s collector (seconds)" % \
self.name
self.collector_histogram = Histogram(histogram_key, histogram_desc)
logger.debug("init %s with sleep_time %d", self.name, self.sleep_time)
def collect(self):
while True:
logger.debug("collecting metrics from %s", self.name)
with self.collector_histogram.time():
self.iteration_counter.labels(name=self.name).inc()
try:
self.atomic_ref.get_and_set(self.collect_impl())
except Exception as e:
logger.exception("%s collector get an exception", self.name)
logger.debug("finished collect metrcis from %s, will sleep for %s",
self.name, self.sleep_time)
time.sleep(self.sleep_time)
def collect_impl(self):
""" implementations are expected to return an array of
prometheus_client's metrics or None on exception """
pass
def instantiate_collector(name, sleep_time, collector_class, *args):
""" test cases helper fn to instantiate a collector """
atomic_ref = AtomicRef()
return atomic_ref, collector_class(name, sleep_time, atomic_ref, iteration_counter, *args)
def make_collector(name, sleep_time, collector_class, *args):
""" other module should use this fn to init a collector, this fn start a thread
to run the collector and return an atomic_ref so outside world can get metrics
collected by this collector """
atomic_ref, instance = instantiate_collector(name, sleep_time, collector_class, *args)
t = threading.Thread(
target=instance.collect,
name=name,
args=(),
daemon=True)
t.start()
return atomic_ref
class DockerCollector(Collector):
cmd_histogram = Histogram("cmd_docker_active_latency_seconds",
"Command call latency for checking docker daemon activeness (seconds)")
cmd_timeout = 1 # 99th latency is 0.01s
def collect_impl(self):
cmd = ["docker", "info"]
error = "ok"
try:
out = utils.exec_cmd(cmd,
histogram=DockerCollector.cmd_histogram,
timeout=DockerCollector.cmd_timeout)
logger.debug("output for docker info is %s", out)
except subprocess.CalledProcessError as e:
logger.exception("command '%s' return with error (code %d): %s",
cmd, e.returncode, e.output)
error = e.strerror()
except subprocess.TimeoutExpired as e:
logger.warning("check docker active timeout")
error = "timeout"
except Exception as e:
error = e.strerror()
counter = gen_docker_daemon_counter()
counter.add_metric([error], 1)
return [counter]
class GpuCollector(Collector):
cmd_histogram = Histogram("cmd_nvidia_smi_latency_seconds",
"Command call latency for nvidia-smi (seconds)")
cmd_timeout = 3 # 99th latency is 0.97s
def __init__(self, name, sleep_time, atomic_ref, iteration_counter, gpu_info_ref):
Collector.__init__(self, name, sleep_time, atomic_ref, iteration_counter)
self.gpu_info_ref = gpu_info_ref
def collect_impl(self):
gpu_info = nvidia.nvidia_smi(GpuCollector.cmd_histogram,
GpuCollector.cmd_timeout)
logger.debug("get gpu_info %s", gpu_info)
self.gpu_info_ref.get_and_set(gpu_info)
if gpu_info is not None:
core_utils = gen_gpu_util_gauge()
mem_utils = gen_gpu_mem_util_gauge()
for minor, info in gpu_info.items():
core_utils.add_metric([minor], info["gpu_util"])
mem_utils.add_metric([minor], info["gpu_mem_util"])
return [core_utils, mem_utils]
return None
class ContainerCollector(Collector):
stats_histogram = Histogram("cmd_docker_stats_latency_seconds",
"Command call latency for docker stats (seconds)")
stats_timeout = 20
# 99th latency may larger than 10s,
# Because prometheus's largest bucket for recording histogram is 10s,
# we can not get value higher than 10s.
inspect_histogram = Histogram("cmd_docker_inspect_latency_seconds",
"Command call latency for docker inspect (seconds)")
inspect_timeout = 1 # 99th latency is 0.042s
iftop_histogram = Histogram("cmd_iftop_latency_seconds",
"Command call latency for iftop (seconds)")
iftop_timeout = 10 # 99th latency is 7.4s
lsof_histogram = Histogram("cmd_lsof_latency_seconds",
"Command call latency for lsof (seconds)")
lsof_timeout = 2 # 99th latency is 0.5s
pai_services = list(map(lambda s: "k8s_" + s, [
"rest-server",
"pylon",
"webportal",
"grafana",
"prometheus",
"alertmanager",
"watchdog",
"end-to-end-test",
"yarn-frameworklauncher",
"hadoop-jobhistory-service",
"hadoop-name-node",
"hadoop-node-manager",
"hadoop-resource-manager",
"hadoop-data-node",
"zookeeper",
"node-exporter",
"job-exporter",
"yarn-exporter",
"nvidia-drivers"
]))
def __init__(self, name, sleep_time, atomic_ref, iteration_counter, gpu_info_ref,
stats_info_ref, interface):
Collector.__init__(self, name, sleep_time, atomic_ref, iteration_counter)
self.gpu_info_ref = gpu_info_ref
self.stats_info_ref = stats_info_ref
self.network_interface = network.try_to_get_right_interface(interface)
logger.info("found %s as potential network interface to listen network traffic",
self.network_interface)
# k8s will prepend "k8s_" to pod name. There will also be a container name
# prepend with "k8s_POD_" which is a docker container used to construct
# network & pid namespace for specific container. These container prepend
# with "k8s_POD" consume nothing.
def collect_impl(self):
all_conns = network.iftop(self.network_interface,
ContainerCollector.iftop_histogram,
ContainerCollector.iftop_timeout)
# set it to None so if nvidia-smi hangs till next time we get,
# we will get None
gpu_infos = self.gpu_info_ref.get_and_set(None)
stats_obj = docker_stats.stats(ContainerCollector.stats_histogram,
ContainerCollector.stats_timeout)
self.stats_info_ref.get_and_set(stats_obj)
logger.debug("all_conns is %s, gpu_info is %s, stats_obj is %s",
all_conns, gpu_infos, stats_obj)
return self.collect_container_metrics(stats_obj, gpu_infos, all_conns)
@staticmethod
def parse_from_labels(labels):
gpu_ids = []
other_labels = {}
for key, val in labels.items():
if "container_label_GPU_ID" == key:
s2 = val.replace("\"", "").split(",")
for id in s2:
if id:
gpu_ids.append(id)
else:
other_labels[key] = val
return gpu_ids, other_labels
@classmethod
def infer_service_name(cls, container_name):
""" try to infer service name from container_name, if it's container not belongs
to pai service, will return None """
if container_name.startswith("k8s_POD_"):
# this is empty container created by k8s for pod
return None
# TODO speed this up, since this is O(n^2)
for service_name in cls.pai_services:
if container_name.startswith(service_name):
return service_name[4:] # remove "k8s_" prefix
return None
def process_one_container(self, container_id, stats, gpu_infos, all_conns, gauges):
container_name = utils.walk_json_field_safe(stats, "name")
pai_service_name = ContainerCollector.infer_service_name(container_name)
inspect_info = docker_inspect.inspect(container_id,
ContainerCollector.inspect_histogram,
ContainerCollector.inspect_timeout)
pid = utils.walk_json_field_safe(inspect_info, "pid")
inspect_labels = utils.walk_json_field_safe(inspect_info, "labels")
logger.debug("%s has pid %s, labels %s, service_name %s",
container_name, pid, inspect_labels, pai_service_name)
if not inspect_labels and pai_service_name is None:
logger.debug("%s is ignored", container_name)
return # other container, maybe kubelet or api-server
# get network consumption, since all our services/jobs running in host
# network, and network statistic from docker is not specific to that
# container. We have to get network statistic by ourselves.
lsof_result = network.lsof(pid,
ContainerCollector.lsof_histogram,
ContainerCollector.lsof_timeout)
net_in, net_out = network.get_container_network_metrics(all_conns,
lsof_result)
if logger.isEnabledFor(logging.DEBUG):
debug_info = utils.exec_cmd(
"ps -o cmd fp {0} | tail -n 1".format(pid),
shell=True)
logger.debug("pid %s with cmd `%s` has lsof result %s, in %d, out %d",
pid, debug_info.strip(), lsof_result, net_in, net_out)
if pai_service_name is None:
gpu_ids, container_labels = ContainerCollector.parse_from_labels(inspect_info["labels"])
container_labels.update(inspect_info["env"])
if gpu_infos:
for id in gpu_ids:
labels = copy.deepcopy(container_labels)
labels["minor_number"] = id
gauges.add_value("task_gpu_percent",
labels, gpu_infos[id]["gpu_util"])
gauges.add_value("task_gpu_mem_percent",
labels, gpu_infos[id]["gpu_mem_util"])
gauges.add_value("task_cpu_percent", container_labels, stats["CPUPerc"])
gauges.add_value("task_mem_usage_byte", container_labels, stats["MemUsage_Limit"]["usage"])
gauges.add_value("task_mem_limit_byte", container_labels, stats["MemUsage_Limit"]["limit"])
gauges.add_value("task_net_in_byte", container_labels, net_in)
gauges.add_value("task_net_out_byte", container_labels, net_out)
gauges.add_value("task_block_in_byte", container_labels, stats["BlockIO"]["in"])
gauges.add_value("task_block_out_byte", container_labels, stats["BlockIO"]["out"])
gauges.add_value("task_mem_usage_percent", container_labels, stats["MemPerc"])
else:
labels = {"name": pai_service_name}
gauges.add_value("service_cpu_percent", labels, stats["CPUPerc"])
gauges.add_value("service_mem_usage_byte", labels, stats["MemUsage_Limit"]["usage"])
gauges.add_value("service_mem_limit_byte", labels, stats["MemUsage_Limit"]["limit"])
gauges.add_value("service_mem_usage_percent", labels, stats["MemPerc"])
gauges.add_value("service_net_in_byte", labels, net_in)
gauges.add_value("service_net_out_byte", labels, net_out)
gauges.add_value("service_block_in_byte", labels, stats["BlockIO"]["in"])
gauges.add_value("service_block_out_byte", labels, stats["BlockIO"]["out"])
def collect_container_metrics(self, stats_obj, gpu_infos, all_conns):
if stats_obj is None:
logger.warning("docker stats returns None")
return None
gauges = ResourceGauges()
for container_id, stats in stats_obj.items():
try:
self.process_one_container(container_id, stats, gpu_infos, all_conns, gauges)
except Exception:
logger.exception("error when trying to process container %s with name %s",
container_id, utils.walk_json_field_safe(stats, "name"))
return gauges.as_array()
class ZombieCollector(Collector):
logs_histogram = Histogram("cmd_docker_logs_latency_seconds",
"Command call latency for docker logs (seconds)")
logs_timeout = 1 # 99th latency is 0.04s
zombie_container_count = Gauge("zombie_container_count",
"number of zombie container found for this node",
["type"])
class ZombieRecorder(object):
def __init__(self, type):
self.type = type
self.zombies = {} # key is container id, value is enter zombie time
# When we first meet zombie container, we only record time of that meet,
# we wait extra decay_time to report it as zombie. Because at the time
# of our recording, zombie just produced, and haven't been recycled, we
# wait 5 minutes to avoid possible cases of normal zombie.
self.decay_time = datetime.timedelta(minutes=5)
def update(self, zombie_ids, now):
""" feed in new zombie ids and get count of decayed zombie """
# remove all records not exist anymore
for z_id in list(self.zombies.keys()):
if z_id not in zombie_ids:
logger.debug("pop zombie %s that not exist anymore", z_id)
self.zombies.pop(z_id)
count = 0
for current in zombie_ids:
if current in self.zombies:
enter_zombie_time = self.zombies[current]
if now - enter_zombie_time > self.decay_time:
count += 1
else:
logger.debug("new zombie %s", current)
self.zombies[current] = now
ZombieCollector.zombie_container_count.labels(self.type).set(count)
return count # for test
def __len__(self):
return len(self.zombies)
def __init__(self, name, sleep_time, atomic_ref, iteration_counter, stats_info_ref):
Collector.__init__(self, name, sleep_time, atomic_ref, iteration_counter)
self.stats_info_ref = stats_info_ref
self.type1_zombies = ZombieCollector.ZombieRecorder("job_exit_hangs")
self.type2_zombies = ZombieCollector.ZombieRecorder("residual_job")
self.yarn_pattern = u"container_\w{3}_[0-9]{13}_[0-9]{4}_[0-9]{2}_[0-9]{6}"
self.yarn_container_reg = re.compile(u"^" + self.yarn_pattern + "$")
self.job_container_reg = re.compile(u"^.+(" + self.yarn_pattern + u")$")
def update_zombie_count_type1(self, exited_containers, now):
""" this fn will generate zombie container count for the first type,
exited_containers is container id set of which we believe exited """
return self.type1_zombies.update(exited_containers, now)
def update_zombie_count_type2(self, stats, now):
""" this fn will generate zombie container count for the second type """
names = set([info["name"] for info in stats.values()])
job_containers = {} # key is original name, value is corresponding yarn_container name
yarn_containers = set()
zombie_ids = set()
for name in names:
if re.match(self.yarn_container_reg, name) is not None:
yarn_containers.add(name)
elif re.match(self.job_container_reg, name) is not None:
match = re.match(self.job_container_reg, name)
value = match.groups()[0]
job_containers[name] = value
else:
pass # ignore
for job_name, yarn_name in job_containers.items():
if yarn_name not in yarn_containers:
zombie_ids.add(job_name)
return self.type2_zombies.update(zombie_ids, now)
def docker_logs(self, container_id, tail="all"):
try:
return utils.exec_cmd(
["docker", "logs", "--tail", str(tail), str(container_id)],
histogram=ZombieCollector.logs_histogram,
stderr=subprocess.STDOUT, # also capture stderr output
timeout=ZombieCollector.logs_timeout)
except subprocess.TimeoutExpired as e:
logger.warning("docker log timeout")
except subprocess.CalledProcessError as e:
logger.warning("docker logs returns %d, output %s", e.returncode, e.output)
except Exception:
logger.exception("exec docker logs error")
return ""
def is_container_exited(self, container_id):
logs = self.docker_logs(container_id, tail=50)
if re.search(u"USER COMMAND END", logs):
return True
return False
def update_zombie_count(self, stats):
"""
There are two types of zombie:
1. container which outputed "USER COMMAND END" but did not exist for a long period of time
2. yarn container exited but job container didn't
"""
if stats is None:
logger.warning("docker stats is None")
return
exited_containers = set(filter(self.is_container_exited, stats.keys()))
now = datetime.datetime.now()
self.update_zombie_count_type1(exited_containers, now)
self.update_zombie_count_type2(stats, now)
def collect_impl(self):
# set it to None so if docker-stats hangs till next time we get,
# we will get None
stats_info = self.stats_info_ref.get_and_set(None)
self.update_zombie_count(stats_info)
``` |
{
"source": "JianfengXu/crosswalk-test-suite",
"score": 2
} |
#### File: wrt-packertool2-android-tests/packertool2/versionCodetest.py
```python
import unittest
import os
import sys
import commands
import comm
class TestPackertoolsFunctions(unittest.TestCase):
def test_manifest_versionCode(self):
comm.setUp()
comm.clear_versionCode()
targetDir = comm.ConstPath + "/../testapp/example/"
manfiestPath = targetDir + "manifest.json"
os.chdir(targetDir)
versionCode = " --app-versionCode=11"
versionCodeBase = ""
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s --project-dir=test" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE, manfiestPath)
comm.versionCode(cmd, versionCode, versionCodeBase, self)
comm.clear_versionCode()
def test_manifest_no_versionCode(self):
comm.setUp()
comm.clear_versionCode()
targetDir = comm.ConstPath + "/../testapp/example/"
manfiestPath = targetDir + "manifest.json"
os.chdir(targetDir)
versionCode = ""
versionCodeBase = ""
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s --project-dir=test" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE, manfiestPath)
comm.versionCode(cmd, versionCode, versionCodeBase, self)
comm.clear_versionCode()
def test_manifest_versionCodeBase(self):
comm.setUp()
comm.clear_versionCode()
targetDir = comm.ConstPath + "/../testapp/example/"
manfiestPath = targetDir + "manifest.json"
os.chdir(targetDir)
versionCode = ""
versionCodeBase = ""
versionCodeBase = " --app-versionCodeBase=1234567"
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s --project-dir=test --app-version=1.0.0" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE, manfiestPath)
comm.versionCode(cmd, versionCode, versionCodeBase, self)
comm.clear_versionCode()
def test_manifest_version_versionCode(self):
comm.setUp()
comm.clear_versionCode()
targetDir = comm.ConstPath + "/../testapp/example/"
manfiestPath = targetDir + "manifest.json"
os.chdir(targetDir)
versionCode = ""
versionCodeBase = ""
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s --project-dir=test --app-version=1.0.0.0" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE, manfiestPath)
packstatus = commands.getstatusoutput(cmd)
errorinfo1 = "Error"
errorinfo2 = "--app-versionCode"
errorinfo3 = "--app-versionCodeBase"
self.assertNotEquals(0, packstatus[0])
self.assertIn(errorinfo1, packstatus[1])
self.assertIn(errorinfo2, packstatus[1])
self.assertIn(errorinfo3, packstatus[1])
comm.clear_versionCode()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JianFeng-Ye/clean-up-low-pixels-images",
"score": 4
} |
#### File: JianFeng-Ye/clean-up-low-pixels-images/main.py
```python
import os
import argparse
from PIL import Image
def _is_image(ext):
return ext.lower().endswith((".bmp", ".png", ".jpg", ".jpeg"))
def _parse_path(path):
path = os.path.normpath(path)
if os.path.exists(path):
return path
raise argparse.ArgumentTypeError("\"%s\" is not exist" % (path,))
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("path", type=_parse_path, help="file path for scan")
parser.add_argument("--lowest-pixels", type=int, default=1000, help="lowest pixels for scan")
return parser.parse_args()
def main():
args = _parse_args()
print("-------- result --------")
scan_file_path = args.path
lowest_pixels = args.lowest_pixels
for root, dirs, files in os.walk(scan_file_path, topdown=False):
remove_list = []
for filename in files:
path = os.path.join(root, filename)
if not _is_image(os.path.splitext(filename)[1]):
s = input("remove this file(%s): [y/n](default: y)" % (path,))
if s == "n":
continue
remove_list.append(path)
else:
with Image.open(path) as img:
if img.size[0] < lowest_pixels or img.size[1] < lowest_pixels:
remove_list.append(path)
for path in remove_list:
os.remove(path)
remove_length = len(remove_list)
if len(files) == remove_length:
_empty_flag = True
for filename in dirs:
if os.path.exists(os.path.join(root, filename)):
_empty_flag = False
break
if _empty_flag:
os.rmdir(root)
print("%s %d files removed." % (root, remove_length))
if __name__ == "__main__":
main()
``` |
{
"source": "jian-frank-cao/spike",
"score": 3
} |
#### File: spike/TwitterMonitor/ConnectTwitterAPI.py
```python
import time
import json
import requests
import uuid
import multiprocessing
from datetime import datetime, timedelta
from requests import HTTPError, ConnectionError
from TwitterAPI import TwitterAPI, TwitterConnectionError, TwitterRequestError
## Define class ---------------------------------------------------------------
class ConnectTwitterAPI:
"""Object that connects Twitter API (Stream, REST, Lab-COVID19)
Functions:
StartMonitor(input_dict, api_type, outlet_type)
"""
def __init__(self, consumer_key, consumer_secret,
access_token_key, access_token_secret):
if (not consumer_key or not consumer_secret or not
access_token_key or not access_token_secret):
raise ValueError('COMSUMER KEY&SECRET, ACCESS KEY&SECRET are needed.')
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token_key = access_token_key
self.access_token_secret = access_token_secret
def GetBearerToken(self, key, secret): # might not be necessary if use TwitterAPI
response = requests.post("https://api.twitter.com/oauth2/token",
auth=(key, secret),
data={'grant_type': 'client_credentials'},
headers={"User-Agent": "BEBOBEBOP"})
if response.status_code != 200:
print(response.status_code)
print(response.text)
raise Exception("Bearer token error")
body = response.json()
print("Bearer token is ready.")
return body['access_token']
def _get_ready(self):
# check input
if not any([x == self.api_type for
x in ['stream_v1', 'rest_v1', 'lab_covid19']]): # add stream_v2, rest_v2
raise Exception('API ' + self.api_type +
' is not currently supported')
if not any([x == self.outlet_type for
x in ['local', 'local_count']]): # add pubsub, kinesis, oracle stream
raise Exception('OUTLET ' + self.outlet_type +
' is not currently supported')
# authorization
if self.api_type == 'stream_v1':
self.twitter_api = TwitterAPI(self.consumer_key,
self.consumer_secret,
self.access_token_key,
self.access_token_secret)
print('oAuth1 is ready.')
if any(x == self.api_type for x in ['rest_v1', 'lab_covid19']):
self.twitter_api = TwitterAPI(self.consumer_key,
self.consumer_secret,
auth_type='oAuth2')
print('oAuth2 is ready.')
if any(x == self.api_type for x in ['stream_v2', 'rest_v2']): # modify this to use TwitterAPI
self.bearer_token = self.GetBearerToken(self.consumer_key,
self.consumer_secret)
# prepare requests
if self.api_type == 'stream_v1':
if any(x not in self.input_dict for x in
['keywords']):
raise ValueError('KEYWORDS is needed.')
self.resource = 'statuses/filter'
self.params = {'track': self.input_dict['keywords']} # add more rules in the params as needed
if self.api_type == 'rest_v1':
if any(x not in self.input_dict for x in
['keywords', 'max_id', 'since_id']):
raise ValueError('KEYWORDS, MAX_ID, and SINCE_ID are needed.')
keywords = '(' + ') OR ('.join(self.input_dict['keywords']) + ')'
self.resource = 'search/tweets'
self.params = {'q': keywords,
'max_id': self.input_dict['max_id'],
'since_id': self.input_dict['since_id'],
'count': 100,
'tweet_mode': 'extended'}
if 'tweets_per_qry' in self.input_dict:
self.params['count'] = self.input_dict['tweets_per_qry']
self.tweet_downloaded = 0
if self.api_type == 'lab_covid19':
if any(x not in self.input_dict for x in
['partition']):
raise ValueError('PARTITION is needed.')
self.params = {'partition': self.input_dict['partition']}
self.resource = 'labs/1/tweets/stream/covid19'
# prepare outlet
if not hasattr(self, 'pipe_in') or not hasattr(self, 'pipe_out'):
self.pipe_in, self.pipe_out = multiprocessing.Pipe()
if ('local' in self.outlet_type and
not hasattr(self, 'tweets')):
self.tweets = []
self.tweet_count = 0
if self.outlet_type == 'local_count':
self.tweets_per_file = 15000
if 'tweets_per_file' in self.input_dict:
self.tweets_per_file = self.input_dict['tweets_per_file']
if self.outlet_type == 'local':
self.minutes_per_file = timedelta(minutes = 15)
if 'minutes_per_file' in self.input_dict:
self.minutes_per_file = timedelta(
minutes = float(self.input_dict['minutes_per_file']))
if any(x not in self.input_dict for x in
['file_prefix', 'download_path']):
raise ValueError('FILE_PREFIX, DOWNLOAD_PATH are needed.')
def _request_stream_v1(self):
self.response = self.twitter_api.request(self.resource,
self.params)
if 'stream_v1' in self.api_type:
print('Connected to Stream API v1.1.')
print('First 10 keywords: ' +
', '.join(self.input_dict['keywords'][:10]) + '.')
else:
print(('Connected to Lab API COVID19 partition ' +
str(self.input_dict['partition'])))
print('Collecting tweets...')
for tweet in self.response:
if 'text' in tweet:
self.pipe_in.send(tweet)
elif 'disconnect' in tweet:
event = tweet['disconnect']
if event['code'] in [2,5,6,7]:
raise Exception(event['reason']) # something needs to be fixed before re-connecting
else:
print(('Disconnect Code: ' + event['code'] +
'. Reason: ' + event['reason']))
return(True) # temporary interruption, re-try request
return(True) # stream stopped with no reason, re-try request
def _request_rest_v1(self):
self.response = self.twitter_api.request(self.resource,
self.params)
if ('statuses' not in self.response.json()):
raise TypeError ('"statuses" not in response.json().')
tweets = self.response.json()['statuses']
n_tweets = len(tweets)
for tweet in tweets:
self.pipe_in.send(tweet)
if n_tweets == 0:
print('No more tweets found.')
self.pipe_in.send("FINISHED")
return(False)
self.tweet_downloaded += n_tweets
print('Downloaded {} tweets.'.format(self.tweet_downloaded))
self.input_dict['max_id'] = tweet['id'] - 1
return(True)
def _collect_tweets(self):
last_error = None
go = True
while go:
retry = False
try:
if any(x in self.api_type for
x in ['stream_v1', 'lab_covid19']):
go = self._request_stream_v1()
if 'rest_v1' in self.api_type:
go = self._request_rest_v1()
except IOError as ioe:
print('[Caught IOError]\n' + str(ioe))
retry = True
except HTTPError as he:
print('[Caught HTTPError]\n' + str(he))
retry = True
except ConnectionError as ce:
print('[Caught ConnectionError]\n' + str(ce))
retry = True
except TypeError as te:
print('[Caught TypeError]\n' + str(te))
retry = True
except TwitterConnectionError as tce:
print('[Caught TwitterConnectionError]\n' + str(tce))
retry = True
except TwitterRequestError as tre:
print('[Caught TwitterRequestError]\n' + str(tre))
retry = True
# retry strategy
if not retry:
if 'rest_v1' in self.api_type:
time.sleep(2.1)
continue
print(self.response.headers)
self.response.close()
if not last_error:
last_error = datetime.now()
error_count = 0
if datetime.now() - last_error > timedelta(seconds = 900):
error_count = 0
wait = min(0.25 * 2**error_count, 30)
last_error = datetime.now()
error_count += 1
print('Wait {} seconds before retrying...'.format(wait))
time.sleep(wait)
def _save_locally(self):
if self.outlet_type == 'local' and not hasattr(self, 'file_timer'):
self.file_timer = datetime.now() + self.minutes_per_file
print('Start saving tweets into local TXT files...')
while True:
tweet = self.pipe_out.recv()
if tweet == "FINISHED":
return(False)
self.tweets.append(tweet)
self.tweet_count += 1
# determine if the file is ready
file_is_ready = False
if 'count' in self.outlet_type:
file_is_ready = self.tweet_count >= self.tweets_per_file
else:
file_is_ready = datetime.now() >= self.file_timer
# file is not ready, continue adding tweets
if not file_is_ready:
continue
# save file
if self.outlet_type == 'local':
self.file_timer = datetime.now() + self.minutes_per_file
tweet_time = self.tweets[-1]['created_at']
time_format = '%a %b %d %H:%M:%S %z %Y'
if 'v2' in self.api_type:
tweet_time = tweet_time[:-5]
time_format = '%Y-%m-%dT%H:%M:%S'
file_time = datetime.strptime(tweet_time,
time_format)
file_name = (self.input_dict['file_prefix'] +
file_time.strftime("-%Y-%m-%d-%H-%M-%S-") +
str(uuid.uuid4()) +
'.txt')
with open(self.input_dict['download_path'] +
file_name, 'w') as file:
file.write(json.dumps(self.tweets))
# confirmation message
if 'count' in self.outlet_type:
print(file_name + ' is saved.')
else:
print('{} ----- {} tweets'.format(str(file_time),
str(len(self.tweets))))
# check stall warnings
if ('warning' in self.tweets[0] and
'percent_full' in tweet['warning']):
if tweet['warning']['percent_full'] > 0: # change threshold when debugging is done.
print('Warning: the queue is ' +
str(tweet['warning']['percent_full']) + '% full.')
# clean self.tweets
self.tweets = []
self.tweet_count = 0
return(True) # stopped with no reason, re-trying
def _tweet_outlet(self):
time.sleep(0.25)
go = True
while go:
if 'local' in self.outlet_type:
go = self._save_locally() # find errors that may occur
"""
retry = False
try:
if 'local' in self.outlet_type:
go = self._save_locally()
except: # find errors that may occur
retry = True
pass
# retry strategy # find better retry strategy
if not retry:
continue
print('Tweet outlet failed, resetting...')
time.sleep(0.25)"""
def StartMonitor(self, input_dict,
api_type = 'stream_v1',
outlet_type = 'local'):
"""Start the monitor
Parameters:
input_dict (dict): dict of input parameters
(parameters, start_id, end_id, etc).
api_type (str): type of API: stream, REST, lab-covid19
outlet_type (str): type of outlet: local disk, pubsub,
kinesis, Oracle stream.
Returns:
None
"""
if not input_dict or not api_type or not outlet_type:
raise ValueError('INPUT_DICT, API_TYPE and ' +
'OUTLET_TYPE are needed.')
# get ready
self.input_dict = input_dict
self.api_type = api_type.lower()
self.outlet_type = outlet_type.lower()
self._get_ready()
# start monitor
self.tweets_in = multiprocessing.Process(target = self._collect_tweets,
args=())
self.tweets_out = multiprocessing.Process(target = self._tweet_outlet,
args=())
self.tweets_in.start()
self.tweets_out.start()
# finish up
self.tweets_in.join()
self.tweets_out.join()
self.pipe_in.close()
self.pipe_out.close()
``` |
{
"source": "Jianfu-She/PySpider",
"score": 3
} |
#### File: 05 - ProxyPool/proxypool/error.py
```python
class PoolEmptyError(Exception):
def __init__(self):
Exception.__init__(self)
def __str__(self):
return repr('代理池已枯竭')
``` |
{
"source": "Jianf-Wang/GRCNN",
"score": 2
} |
#### File: Jianf-Wang/GRCNN/imagenet_data.py
```python
import os
from PIL import Image
import numpy as np
import torch
import torch.utils.data
import random
import pickle as pickle
class Imagenet_D(torch.utils.data.Dataset):
def __init__(self, imagenet_root, img_list_pkl, transform, label_list, mode='train', cutout=True):
self.img_list = img_list_pkl
if mode == 'train':
self.dir = os.path.join(imagenet_root, 'train')
else:
self.dir = os.path.join(imagenet_root, 'val')
self.cutout = cutout
self.transform = transform
self.img_label_list = []
self.label_list = label_list
img_list_all = pickle.load(open(self.img_list, 'rb'))
i = 0
for ele in img_list_all:
label = ele[0].split('/')[-2]
self.label_list[label] = i
i+=1
for ele in img_list_all:
label = ele[0].split('/')[-2]
for ele_1 in ele:
name = ele_1.split('/')[-1]
path_ = os.path.join(label, name)
path_total = os.path.join(self.dir, path_)
self.img_label_list.append([path_total, self.label_list[label]])
def __len__(self):
return len(self.img_label_list)
def __getitem__(self, idx):
img = self.img_label_list[idx][0]
label = self.img_label_list[idx][1]
image = Image.open(img).convert('RGB')
image = self.transform(image)
return image, label
```
#### File: Jianf-Wang/GRCNN/losses.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class LabelSmoothLoss(nn.Module):
def __init__(self, smoothing=0.1):
super(LabelSmoothLoss, self).__init__()
self.smoothing = smoothing
def forward(self, input, target):
log_prob = F.log_softmax(input, dim=-1)
weight = input.new_ones(input.size()) * self.smoothing / (input.size(-1) - 1.)
weight.scatter_(-1, target.unsqueeze(-1), (1. - self.smoothing))
loss = (-weight * log_prob).sum(dim=-1).mean()
return loss
def cross_entropy_loss(input, target, size_average=True, smooth=0.1):
input = F.log_softmax(input, dim=1)
target_1 = (target > 0).int().long()
target_2 = (1. - target_1)
target_1 = ((target_1 * smooth)/target_1.sum(dim=1, keepdim=True)).float()
target_2 = ((target_2 * smooth)/target_2.sum(dim=1, keepdim=True)).float()
target = target - target_1 + target_2
loss = -torch.sum(input * target)
if size_average:
return loss / input.size(0)
else:
return loss
class CrossEntropyLossSmooth(object):
def __init__(self, size_average=True):
self.size_average = size_average
def __call__(self, input, target):
return cross_entropy_loss(input, target, self.size_average)
``` |
{
"source": "jiang1997/mmaction2",
"score": 2
} |
#### File: test_pipelines/test_loadings/test_sampling.py
```python
import copy
import numpy as np
import pytest
from mmcv.utils import assert_dict_has_keys
from numpy.testing import assert_array_equal
from mmaction.datasets.pipelines import (AudioFeatureSelector,
DenseSampleFrames, SampleAVAFrames,
SampleFrames, SampleProposalFrames,
UntrimmedSampleFrames)
from .base import BaseTestLoading
class TestSampling(BaseTestLoading):
def test_sample_frames(self):
target_keys = [
'frame_inds', 'clip_len', 'frame_interval', 'num_clips',
'total_frames'
]
with pytest.warns(UserWarning):
# start_index has been deprecated
config = dict(
clip_len=3, frame_interval=1, num_clips=5, start_index=1)
SampleFrames(**config)
# Sample Frame with tail Frames
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=3, frame_interval=1, num_clips=5, keep_tail_frames=True)
sample_frames = SampleFrames(**config)
sample_frames(video_result)
sample_frames(frame_result)
# Sample Frame with no temporal_jitter
# clip_len=3, frame_interval=1, num_clips=5
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=3, frame_interval=1, num_clips=5, temporal_jitter=False)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 15
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 15
assert np.max(sample_frames_results['frame_inds']) <= 5
assert np.min(sample_frames_results['frame_inds']) >= 1
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={3}, '
f'frame_interval={1}, '
f'num_clips={5}, '
f'temporal_jitter={False}, '
f'twice_sample={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={False})')
# Sample Frame with no temporal_jitter
# clip_len=5, frame_interval=1, num_clips=5,
# out_of_bound_opt='repeat_last'
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=5,
frame_interval=1,
num_clips=5,
temporal_jitter=False,
out_of_bound_opt='repeat_last')
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={5}, '
f'frame_interval={1}, '
f'num_clips={5}, '
f'temporal_jitter={False}, '
f'twice_sample={False}, '
f'out_of_bound_opt=repeat_last, '
f'test_mode={False})')
def check_monotonous(arr):
length = arr.shape[0]
for i in range(length - 1):
if arr[i] > arr[i + 1]:
return False
return True
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 25
frame_inds = sample_frames_results['frame_inds'].reshape([5, 5])
for i in range(5):
assert check_monotonous(frame_inds[i])
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 25
frame_inds = sample_frames_results['frame_inds'].reshape([5, 5])
for i in range(5):
assert check_monotonous(frame_inds[i])
assert np.max(sample_frames_results['frame_inds']) <= 5
assert np.min(sample_frames_results['frame_inds']) >= 1
# Sample Frame with temporal_jitter
# clip_len=4, frame_interval=2, num_clips=5
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4, frame_interval=2, num_clips=5, temporal_jitter=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 20
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 20
assert np.max(sample_frames_results['frame_inds']) <= 5
assert np.min(sample_frames_results['frame_inds']) >= 1
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={4}, '
f'frame_interval={2}, '
f'num_clips={5}, '
f'temporal_jitter={True}, '
f'twice_sample={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={False})')
# Sample Frame with no temporal_jitter in test mode
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
temporal_jitter=False,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 24
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 24
assert np.max(sample_frames_results['frame_inds']) <= 5
assert np.min(sample_frames_results['frame_inds']) >= 1
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={4}, '
f'frame_interval={1}, '
f'num_clips={6}, '
f'temporal_jitter={False}, '
f'twice_sample={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={True})')
# Sample Frame with no temporal_jitter in test mode
# clip_len=3, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=3,
frame_interval=1,
num_clips=6,
temporal_jitter=False,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 18
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 18
assert np.max(sample_frames_results['frame_inds']) <= 5
assert np.min(sample_frames_results['frame_inds']) >= 1
# Sample Frame with no temporal_jitter to get clip_offsets
# clip_len=1, frame_interval=1, num_clips=8
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 6
config = dict(
clip_len=1,
frame_interval=1,
num_clips=8,
temporal_jitter=False,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 8
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 8
assert_array_equal(sample_frames_results['frame_inds'],
np.array([1, 2, 2, 3, 4, 5, 5, 6]))
# Sample Frame with no temporal_jitter to get clip_offsets
# clip_len=1, frame_interval=1, num_clips=8
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 6
config = dict(
clip_len=1,
frame_interval=1,
num_clips=8,
temporal_jitter=False,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 8
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 8
assert_array_equal(sample_frames_results['frame_inds'],
np.array([1, 2, 2, 3, 4, 5, 5, 6]))
# Sample Frame with no temporal_jitter to get clip_offsets zero
# clip_len=6, frame_interval=1, num_clips=1
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 5
config = dict(
clip_len=6,
frame_interval=1,
num_clips=1,
temporal_jitter=False,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 6
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 6
assert_array_equal(sample_frames_results['frame_inds'],
[1, 2, 3, 4, 5, 1])
# Sample Frame with no temporal_jitter to get avg_interval <= 0
# clip_len=12, frame_interval=1, num_clips=20
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 30
config = dict(
clip_len=12,
frame_interval=1,
num_clips=20,
temporal_jitter=False,
test_mode=False)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 240
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 240
assert np.max(sample_frames_results['frame_inds']) <= 30
assert np.min(sample_frames_results['frame_inds']) >= 1
# Sample Frame with no temporal_jitter to get clip_offsets
# clip_len=1, frame_interval=1, num_clips=8
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 6
config = dict(
clip_len=1,
frame_interval=1,
num_clips=8,
temporal_jitter=False,
test_mode=False)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert sample_frames_results['start_index'] == 0
assert len(sample_frames_results['frame_inds']) == 8
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 8
assert_array_equal(sample_frames_results['frame_inds'],
np.array([1, 2, 3, 3, 4, 5, 5, 6]))
# Sample Frame with no temporal_jitter to get clip_offsets zero
# clip_len=12, frame_interval=1, num_clips=2
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 10
config = dict(
clip_len=12,
frame_interval=1,
num_clips=2,
temporal_jitter=False,
test_mode=False)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 24
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 24
assert np.max(sample_frames_results['frame_inds']) <= 10
assert np.min(sample_frames_results['frame_inds']) >= 1
# Sample Frame using twice sample
# clip_len=12, frame_interval=1, num_clips=2
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 40
config = dict(
clip_len=12,
frame_interval=1,
num_clips=2,
temporal_jitter=False,
twice_sample=True,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 48
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 48
assert np.max(sample_frames_results['frame_inds']) <= 40
assert np.min(sample_frames_results['frame_inds']) >= 1
def test_dense_sample_frames(self):
target_keys = [
'frame_inds', 'clip_len', 'frame_interval', 'num_clips',
'total_frames'
]
# Dense sample with no temporal_jitter in test mode
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
temporal_jitter=False,
test_mode=True)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(dense_sample_frames_results, target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 240
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 240
assert repr(dense_sample_frames) == (
f'{dense_sample_frames.__class__.__name__}('
f'clip_len={4}, '
f'frame_interval={1}, '
f'num_clips={6}, '
f'sample_range={64}, '
f'num_sample_positions={10}, '
f'temporal_jitter={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={True})')
# Dense sample with no temporal_jitter
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4, frame_interval=1, num_clips=6, temporal_jitter=False)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(dense_sample_frames_results, target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 24
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 24
# Dense sample with no temporal_jitter, sample_range=32 in test mode
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
sample_range=32,
temporal_jitter=False,
test_mode=True)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(dense_sample_frames_results, target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 240
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 240
# Dense sample with no temporal_jitter, sample_range=32
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
sample_range=32,
temporal_jitter=False)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(dense_sample_frames_results, target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 24
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 24
assert repr(dense_sample_frames) == (
f'{dense_sample_frames.__class__.__name__}('
f'clip_len={4}, '
f'frame_interval={1}, '
f'num_clips={6}, '
f'sample_range={32}, '
f'num_sample_positions={10}, '
f'temporal_jitter={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={False})')
# Dense sample with no temporal_jitter, sample_range=1000 to check mod
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
sample_range=1000,
temporal_jitter=False)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(dense_sample_frames_results, target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 24
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 24
# Dense sample with no temporal_jitter in test mode
# sample_range=32, num_sample_positions=5
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
num_sample_positions=5,
sample_range=32,
temporal_jitter=False,
test_mode=True)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(dense_sample_frames_results, target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 120
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 120
assert repr(dense_sample_frames) == (
f'{dense_sample_frames.__class__.__name__}('
f'clip_len={4}, '
f'frame_interval={1}, '
f'num_clips={6}, '
f'sample_range={32}, '
f'num_sample_positions={5}, '
f'temporal_jitter={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={True})')
def test_untrim_sample_frames(self):
target_keys = [
'frame_inds', 'clip_len', 'frame_interval', 'num_clips',
'total_frames'
]
frame_result = dict(
frame_dir=None,
total_frames=100,
filename_tmpl=None,
modality='RGB',
start_index=0,
label=1)
video_result = copy.deepcopy(self.video_results)
config = dict(clip_len=1, frame_interval=16, start_index=0)
sample_frames = UntrimmedSampleFrames(**config)
sample_frames_results = sample_frames(frame_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 6
assert_array_equal(sample_frames_results['frame_inds'],
np.array([8, 24, 40, 56, 72, 88]))
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'frame_interval={16})')
config = dict(clip_len=1, frame_interval=16, start_index=0)
sample_frames = UntrimmedSampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
frame_inds = np.array(list(range(8, 300, 16)))
assert len(sample_frames_results['frame_inds']) == frame_inds.shape[0]
assert_array_equal(sample_frames_results['frame_inds'], frame_inds)
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'frame_interval={16})')
config = dict(clip_len=1, frame_interval=16)
sample_frames = UntrimmedSampleFrames(**config)
frame_result_ = copy.deepcopy(frame_result)
frame_result_['start_index'] = 1
sample_frames_results = sample_frames(frame_result_)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 6
assert_array_equal(sample_frames_results['frame_inds'],
np.array([8, 24, 40, 56, 72, 88]) + 1)
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'frame_interval={16})')
config = dict(clip_len=3, frame_interval=16, start_index=0)
sample_frames = UntrimmedSampleFrames(**config)
sample_frames_results = sample_frames(frame_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 18
assert_array_equal(
sample_frames_results['frame_inds'],
np.array([
7, 8, 9, 23, 24, 25, 39, 40, 41, 55, 56, 57, 71, 72, 73, 87,
88, 89
]))
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={3}, '
f'frame_interval={16})')
def test_sample_ava_frames(self):
target_keys = [
'fps', 'timestamp', 'timestamp_start', 'shot_info', 'frame_inds',
'clip_len', 'frame_interval'
]
config = dict(clip_len=32, frame_interval=2)
sample_ava_dataset = SampleAVAFrames(**config)
ava_result = sample_ava_dataset(results=self.ava_results)
assert assert_dict_has_keys(ava_result, target_keys)
assert ava_result['clip_len'] == 32
assert ava_result['frame_interval'] == 2
assert len(ava_result['frame_inds']) == 32
assert repr(sample_ava_dataset) == (
f'{sample_ava_dataset.__class__.__name__}('
f'clip_len={32}, '
f'frame_interval={2}, '
f'test_mode={False})')
# add test case in Issue #306
config = dict(clip_len=8, frame_interval=8)
sample_ava_dataset = SampleAVAFrames(**config)
ava_result = sample_ava_dataset(results=self.ava_results)
assert assert_dict_has_keys(ava_result, target_keys)
assert ava_result['clip_len'] == 8
assert ava_result['frame_interval'] == 8
assert len(ava_result['frame_inds']) == 8
assert repr(sample_ava_dataset) == (
f'{sample_ava_dataset.__class__.__name__}('
f'clip_len={8}, '
f'frame_interval={8}, '
f'test_mode={False})')
def test_sample_proposal_frames(self):
target_keys = [
'frame_inds', 'clip_len', 'frame_interval', 'num_clips',
'total_frames', 'start_index'
]
# test error cases
with pytest.raises(TypeError):
proposal_result = copy.deepcopy(self.proposal_results)
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=('error', 'error'),
aug_ratio=0.5,
temporal_jitter=False)
sample_frames = SampleProposalFrames(**config)
sample_frames(proposal_result)
# test normal cases
# Sample Frame with no temporal_jitter
# clip_len=1, frame_interval=1
# body_segments=2, aug_segments=(1, 1)
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['total_frames'] = 9
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=(1, 1),
aug_ratio=0.5,
temporal_jitter=False)
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 8
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={2}, '
f'aug_segments={(1, 1)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={6}, '
f'temporal_jitter={False}, '
f'mode=train)')
# Sample Frame with temporal_jitter
# clip_len=1, frame_interval=1
# body_segments=2, aug_segments=(1, 1)
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['total_frames'] = 9
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=(1, 1),
aug_ratio=0.5,
temporal_jitter=True)
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 8
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={2}, '
f'aug_segments={(1, 1)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={6}, '
f'temporal_jitter={True}, '
f'mode=train)')
# Sample Frame with no temporal_jitter in val mode
# clip_len=1, frame_interval=1
# body_segments=2, aug_segments=(1, 1)
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['total_frames'] = 9
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=(1, 1),
aug_ratio=0.5,
temporal_jitter=False,
mode='val')
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 8
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={2}, '
f'aug_segments={(1, 1)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={6}, '
f'temporal_jitter={False}, '
f'mode=val)')
# Sample Frame with no temporal_jitter in test mode
# test_interval=2
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['out_proposals'] = None
proposal_result['total_frames'] = 10
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=(1, 1),
aug_ratio=0.5,
test_interval=2,
temporal_jitter=False,
mode='test')
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 5
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={2}, '
f'aug_segments={(1, 1)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={2}, '
f'temporal_jitter={False}, '
f'mode=test)')
# Sample Frame with no temporal_jitter to get clip_offsets zero
# clip_len=1, frame_interval=1
# body_segments=2, aug_segments=(1, 1)
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['total_frames'] = 3
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=(1, 1),
aug_ratio=0.5,
temporal_jitter=False)
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 8
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={2}, '
f'aug_segments={(1, 1)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={6}, '
f'temporal_jitter={False}, '
f'mode=train)')
# Sample Frame with no temporal_jitter to
# get clip_offsets zero in val mode
# clip_len=1, frame_interval=1
# body_segments=4, aug_segments=(2, 2)
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['total_frames'] = 3
config = dict(
clip_len=1,
frame_interval=1,
body_segments=4,
aug_segments=(2, 2),
aug_ratio=0.5,
temporal_jitter=False,
mode='val')
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 16
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={4}, '
f'aug_segments={(2, 2)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={6}, '
f'temporal_jitter={False}, '
f'mode=val)')
def test_audio_feature_selector(self):
target_keys = ['audios']
# test frame selector with 2 dim input
inputs = copy.deepcopy(self.audio_feature_results)
inputs['frame_inds'] = np.arange(0, self.audio_total_frames,
2)[:, np.newaxis]
inputs['num_clips'] = 1
inputs['length'] = 1280
audio_feature_selector = AudioFeatureSelector()
results = audio_feature_selector(inputs)
assert assert_dict_has_keys(results, target_keys)
assert repr(audio_feature_selector) == (
f'{audio_feature_selector.__class__.__name__}('
f'fix_length={128})')
``` |
{
"source": "jiang1hong2xie/111",
"score": 2
} |
#### File: 111/utils/general.py
```python
import os
import numpy as np
import time
import logging
import sys
import subprocess
import shlex
from shutil import copyfile
import json
from threading import Timer
from os import listdir
from os.path import isfile, join
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.pdf']
DEVNULL = open(os.devnull, "w")
def allow_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def get_img_list(img_path):
return [img_dir for img_dir in os.listdir(img_path) if allow_image_file(img_dir)]
def run(cmd, timeout_sec):
"""Run cmd in the shell with timeout"""
proc = subprocess.Popen(cmd, shell=True)
def kill_proc(p): return p.kill()
timer = Timer(timeout_sec, kill_proc, [proc])
try:
timer.start()
stdout, stderr = proc.communicate()
finally:
timer.cancel()
def run_call(cmd, timeout_sec):
"""Run cmd in the shell with timeout"""
assert isinstance(cmd, list), 'CMD must be format list '
proc = subprocess.call(cmd, stdout=DEVNULL, stderr=DEVNULL, timeout=timeout_sec)
return proc
def get_logger(filename):
"""Return instance of logger"""
logger = logging.getLogger('logger')
logger.setLevel(logging.INFO)
logging.basicConfig(format='%(message)s', level=logging.INFO)
handler = logging.FileHandler(filename)
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter(
'%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger
def init_dir(dir_name):
"""Creates directory if it does not exists"""
if dir_name is not None:
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def init_file(path_file, mode="a"):
"""Makes sure that a given file exists"""
with open(path_file, mode) as f:
pass
def get_files(dir_name):
files = [f for f in listdir(dir_name) if isfile(join(dir_name, f))]
return files
def delete_file(path_file):
try:
os.remove(path_file)
except Exception:
pass
``` |
{
"source": "jiang7369/darts-clone-python",
"score": 3
} |
#### File: darts-clone-python/test/test_darts.py
```python
import tempfile
import unittest
import pickle
from dartsclone import DoubleArray
class DoubleArrayTest(unittest.TestCase):
"""test class of double array
"""
def test_darts_no_values(self):
keys = ['test', 'テスト', 'テストケース']
darts = DoubleArray()
darts.build(sorted([key.encode() for key in keys]))
self.assertEqual(1, darts.exact_match_search('テスト'.encode(), pair_type=False))
self.assertEqual(0, darts.common_prefix_search('testcase'.encode(), pair_type=False)[0])
self.assertEqual(0, darts.exact_match_search('test'.encode(), pair_type=False))
self.assertEqual(2, darts.common_prefix_search('テストケース'.encode(), pair_type=False)[1])
def test_darts_with_values(self):
keys = ['test', 'テスト', 'テストケース']
darts = DoubleArray()
darts.build(sorted([key.encode() for key in keys]), values=[3, 5, 1])
self.assertEqual(5, darts.exact_match_search('テスト'.encode(), pair_type=False))
self.assertEqual(3, darts.common_prefix_search('testcase'.encode(), pair_type=False)[0])
self.assertEqual(1, darts.exact_match_search('テストケース'.encode(), pair_type=False))
self.assertEqual(1, darts.common_prefix_search('テストケース'.encode(), pair_type=False)[1])
def test_darts_save(self):
keys = ['test', 'テスト', 'テストケース']
darts = DoubleArray()
darts.build(sorted([key.encode() for key in keys]), values=[3, 5, 1])
with tempfile.NamedTemporaryFile('wb') as output_file:
darts.save(output_file.name)
output_file.flush()
darts.clear()
darts.open(output_file.name)
self.assertEqual(5, darts.exact_match_search('テスト'.encode(), pair_type=False))
self.assertEqual(3, darts.common_prefix_search('testcase'.encode(), pair_type=False)[0])
def test_darts_pickle(self):
keys = ['test', 'テスト', 'テストケース']
darts = DoubleArray()
darts.build(sorted([key.encode() for key in keys]), values=[3, 5, 1])
with tempfile.NamedTemporaryFile('wb') as output_file:
pickle.dump(darts, output_file)
output_file.flush()
with open(output_file.name, 'rb') as input_file:
darts = pickle.load(input_file)
self.assertEqual(5, darts.exact_match_search('テスト'.encode(), pair_type=False))
self.assertEqual(3, darts.common_prefix_search('testcase'.encode(), pair_type=False)[0])
def test_darts_array(self):
keys = ['test', 'テスト', 'テストケース']
darts = DoubleArray()
darts.build(sorted([key.encode() for key in keys]), values=[3, 5, 1])
array = darts.array()
darts = DoubleArray()
darts.set_array(array)
self.assertEqual(5, darts.exact_match_search('テスト'.encode(), pair_type=False))
self.assertEqual(3, darts.common_prefix_search('testcase'.encode(), pair_type=False)[0])
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jianganbai/Collision-Classification-and-Matching",
"score": 2
} |
#### File: Collision-Classification-and-Matching/task2/t2_set_within_class_match.py
```python
import numpy as np
import copy
def loss_func(sf0, sf1, bf0, bf1, confi):
# 权重越大越重要,基础权重要将2种特征的影响力调到等额
weight0_base = 3 # 强度的基础权重,差距很小则权重有奖励,强度差得越大则权重有惩罚
weight1_base = 1 # 碰撞边的基础权重,置信系数=0则权重很小
# 特征0的损失函数
# 第1列为差值区间,第2列为附加权重
feat0_loss_ref = np.array([[0, 0.05, 0.1, 0.7, 0.9, 1], [0.4, 0.7, 1, 2, 5, 100]])
d0 = abs(sf0-bf0)
for i in range(1, feat0_loss_ref.shape[1]):
start = feat0_loss_ref[0, i-1]
end = feat0_loss_ref[0, i]
if (start <= d0) and (d0 <= end):
loss0 = d0*weight0_base*feat0_loss_ref[1, i-1]
break
# 特征1的损失函数
unknown_weight = 0.2
d1 = abs(sf1-bf1) % 2 # 距离至多差2
loss1 = d1*weight1_base*max(confi, unknown_weight)
loss = loss0 + loss1
return loss
def traverse(small_mat, big_mat, hit_confi, small_type, small_num, level,
num_remain, current_match, current_loss, loss_min, best_match):
# audio没有可匹配的或video没有可匹配的
if (level >= small_num) or (len(num_remain) == 0):
if current_loss < loss_min:
best_match = copy.deepcopy(current_match)
loss_min = current_loss
return best_match, loss_min
# 还可以再匹配
else:
# 使用小集合去匹配大集合
for big_idx in num_remain:
sf0 = small_mat[level, 0]
sf1 = small_mat[level, 1]
bf0 = big_mat[big_idx, 0]
bf1 = big_mat[big_idx, 1]
if small_type == 'audio': # 视频是大集合
confi = hit_confi[big_idx]
else: # 等大或者音频是大集合
confi = hit_confi[level]
addition_loss = loss_func(sf0, sf1, bf0, bf1, confi)
loss_new = current_loss + addition_loss
if loss_new >= loss_min:
continue
else: # 这一步还没有超过loss_min
level_new = level+1
num_remain_new = copy.deepcopy(num_remain)
num_remain_new.remove(big_idx)
match_new = copy.deepcopy(current_match)
match_new[level] = big_idx
# 递归
best_match, loss_min = traverse(small_mat, big_mat,
hit_confi, small_type,
small_num, level_new,
num_remain_new, match_new,
loss_new, loss_min,
best_match)
return best_match, loss_min
def set_within_class_match(video_feat_dict, audio_feat_dict):
'''针对一个测试组中的一类物体间的匹配,已提取出2维特征
第1维特征为(视频最大帧间位移,音频最大幅度)
第2维特征为(视频预测碰撞边,音频预测碰撞边)
Args:
video_feat_dict (字典): {‘视频文件夹名’:[最大位移值,[最大速度后碰撞边编号,置信系数]]}
audio_feat_dict (字典): {‘音频名’:[短时功率的最大值,最明显碰撞的碰撞边编号]}
Returns:
match_result : {'音频名': '匹配上的视频文件夹名'}
unmatched : # {‘video’ or 'audio': [名字]},没有未匹配上的则返回match
'''
max_intense = 1 # 特征0的正则化系数
# 若有1个为空,则直接返回
if not bool(video_feat_dict): # video为空
if not bool(audio_feat_dict): # audio为空
match_result = {}
unmatched = None
else:
match_result = {}
unmatched = {'audio': list(audio_feat_dict.keys())}
return match_result, unmatched
if not bool(audio_feat_dict): # audio为空
match_result = {}
unmatched = {'video': list(video_feat_dict.keys())}
return match_result, unmatched
# 转化为矩阵形式,去除文件名
video_feat_list = []
hit_confi = []
for video_feat in video_feat_dict.values():
video_feat_list.append([video_feat[0], video_feat[1][0]])
hit_confi.append(video_feat[1][1]) # 碰撞边的置信系数
audio_feat_list = []
for audio_feat in audio_feat_dict.values():
audio_feat_list.append(audio_feat)
video_feat_mat = np.array(video_feat_list, dtype='float64')
audio_feat_mat = np.array(audio_feat_list, dtype='float64')
# 对第1维特征做归max_intense化
video_1_max = max(video_feat_mat[:, 0].max(), 1)
video_feat_mat[:, 0] = video_feat_mat[:, 0]/video_1_max*max_intense
audio_1_max = max(audio_feat_mat[:, 0].max(), 1)
audio_feat_mat[:, 0] = audio_feat_mat[:, 0]/audio_1_max*max_intense
# 找到样本数小的集合和样本数大的集合
video_num = video_feat_mat.shape[0]
audio_num = audio_feat_mat.shape[0]
if video_num > audio_num: # 音频是小集合
small_mat = audio_feat_mat
big_mat = video_feat_mat
small_type = 'audio'
else: # 等大或视频是小集合
small_mat = video_feat_mat
big_mat = audio_feat_mat
small_type = 'equal_or_video'
# 使用递归进行先序遍历
small_num = small_mat.shape[0]
big_num = big_mat.shape[0]
num_remain = list(range(0, big_num))
current_match = -1*np.ones(small_num, dtype='int')
loss_min = 10000
best_match, _ = traverse(small_mat=small_mat, big_mat=big_mat,
hit_confi=hit_confi, small_type=small_type,
small_num=small_num, level=0,
num_remain=num_remain, current_match=current_match,
current_loss=0, loss_min=loss_min,
best_match=current_match)
# 制成字典
all_video_name = list(video_feat_dict.keys())
all_audio_name = list(audio_feat_dict.keys())
match_result = {} # {'audio_0001': 'video_0034'}
unmatched = None # {‘video’ or 'audio': [名字]}
if video_num > audio_num: # 视频数>音频数
# 整合所有匹配结果
for audio_counter, audio_name in enumerate(all_audio_name):
video_idx = best_match[audio_counter]
video_name = all_video_name[video_idx] # 匹配上的视频的文件夹名
single_match = {audio_name: video_name}
match_result.update(single_match)
# 整合所有未匹配结果
unmatched_video = []
for i in range(0, len(video_feat_dict)):
if not (i in best_match): # 视频没有匹配上
video_name = all_video_name[i]
unmatched_video.append(video_name)
if len(unmatched_video) > 0:
unmatched = {'video': unmatched_video}
else: # 视频数<=音频数
# 整合所有匹配结果为audio的索引
for video_counter, video_name in enumerate(all_video_name):
audio_name = all_audio_name[best_match[video_counter]]
single_match = {audio_name: video_name}
match_result.update(single_match)
unmatched_audio = []
for i in range(0, len(audio_feat_dict)):
if not (i in best_match): # 音频没有匹配上
audio_name = all_audio_name[i]
unmatched_audio.append(audio_name)
if len(unmatched_audio) > 0:
unmatched = {'audio': unmatched_audio}
return match_result, unmatched
def main():
video_feats = {'video00': [0, [1, 1]], 'video01': [1, [0, 1]], 'video2': [0.7, [1, 1]], 'video3': [0.8, [1, 1]]}
audio_feats = {'audio0': [1, 0], 'audio1': [0.8, 1]}
set_within_class_match(video_feats, audio_feats)
input()
if __name__ == '__main__':
main()
```
#### File: Collision-Classification-and-Matching/task3/t3_audio_CNN_3_test.py
```python
import os
import numpy as np
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
import t2_audio_process
np.set_printoptions(threshold=np.inf)
class AudioDataset_test(Dataset):
def __init__(self, csv_file, transform=None):
self.data = np.load(csv_file)
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
data = np.reshape(self.data[idx, 0:], (4, 40, 1))
data = torch.tensor(data)
data = data.type(torch.FloatTensor)
return data
class RCNet(nn.Module):
def __init__(self, num_classes=10):
super(RCNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(in_channels=4, out_channels=16, kernel_size=1, stride=1, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=1, stride=1),
nn.Conv2d(16, 32, kernel_size=3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(32 * 5 * 4, num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 32 * 5 * 4)
x = self.classifier(x)
return x
def test_rcnet(model, test_iter, device):
result = []
model.eval()
for X in test_iter:
X = X.to(device)
output = model(X)
_, preds = torch.max(output, 1)
obj = preds[0].item()
result.append(obj)
return result
def set_audio_classify(set_dir):
'''对task2每组测试数据的音频进行分类,返回字典形式的分类结果
Args:
set_dir (字符串): 例:'.dataset/task2/test/0'
Returns:
[字典]: 例:{audio_0000: 2, audio_0001: 3}
'''
batch_size = 1
# load model
model = torch.load('t1_cnn.pth') # 使用相同的网络参数
###########################################################################
set_num = set_dir[-1]
data_name = 't3_'+set_num+'_audio_feats.npy'
t2_audio_process.audio_process(set_dir, data_name) # 计算mfcc结果
###########################################################################
test_dataset = AudioDataset_test(data_name)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
result_list = test_rcnet(model, test_dataloader, device) # 返回所有判断结果的list
# 制成字典
result_dict = {}
counter = 0
for file_name in os.listdir(set_dir):
file_pth = set_dir + '/' + file_name
if not os.path.isdir(file_pth): # 各音频文件的名字
result_dict.update({file_name: result_list[counter]})
counter += 1
return result_dict
def main():
set_dir = './dataset/task2/test/0'
result_dict = set_audio_classify(set_dir)
return result_dict
if __name__ == '__main__':
main()
``` |
{
"source": "jiangbestone/DetectRccn",
"score": 2
} |
#### File: jiangbestone/DetectRccn/trainval_net.py
```python
import argparse
import pdb
import time
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.autograd import Variable
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.sampler import Sampler
import torch.utils.data
from torch.utils.tensorboard import SummaryWriter
from lib.roi_data_layer import evaluate
from models.rcnn import Model
from lib import google_utils
from lib.data.tools.ds_unitls import *
from lib.data.tools.factory import *
from lib.data.imagenet import *
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int,
default=300)
parser.add_argument('--batch-size', type=int,
default=8)
parser.add_argument('--net', type=str,
default='lib/data/tools/datanet.yml', help='vgg.yaml, res101.yaml')
parser.add_argument('--cfgs', type=str,
default='cfgs/dataset.yml',
help='*dataset.yml')
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--img-size', nargs='+', type=int,
default=[640, 640],
help='train,test sizes')
parser.add_argument('--weights', type=str, default='',
help='weights path')
parser.add_argument('--bs', dest='batch_size',
help='batch_size',
default=1, type=int)
parser.add_argument('--cag', dest='class_agnostic',
help='whether to perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--device', default='',
help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
args = parser.parse_args()
return args
mixed_precision = True
try:
from apex import amp
except:
print('Apex recommended for faster mixed precision training: https://github.com/NVIDIA/apex')
mixed_precision = False # not installed
wdir = 'weights' + os.sep # weights dir
os.makedirs(wdir, exist_ok=True)
last = wdir + 'last.pt'
best = wdir + 'best.pt'
results_file = 'results.txt'
class sampler(Sampler):
def __init__(self, train_size, batch_size):
self.num_data = train_size
self.num_per_batch = int(train_size / batch_size)
self.batch_size = batch_size
self.range = torch.arange(0,batch_size).view(1, batch_size).long()
self.leftover_flag = False
if train_size % batch_size:
self.leftover = torch.arange(self.num_per_batch*batch_size, train_size).long()
self.leftover_flag = True
def __iter__(self):
rand_num = torch.randperm(self.num_per_batch).view(-1,1) * self.batch_size
self.rand_num = rand_num.expand(self.num_per_batch, self.batch_size) + self.range
self.rand_num_view = self.rand_num.view(-1)
if self.leftover_flag:
self.rand_num_view = torch.cat((self.rand_num_view, self.leftover),0)
return iter(self.rand_num_view)
def __len__(self):
return self.num_data
def detect1Image(im0, imgsz, model, device, conf_thres, iou_thres, aug = False):
img = letterbox(im0, new_shape=imgsz)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(device)
img = img.float() # uint8 to fp16/32
img /= 255.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = model(img, augment=aug)[0]
# Apply NMS
pred = non_max_suppression(pred, conf_thres, iou_thres)
boxes = []
scores = []
for i, det in enumerate(pred): # detections per image
# save_path = 'draw/' + image_id + '.jpg'
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Write results
for *xyxy, conf, cls in det:
boxes.append([int(xyxy[0]), int(xyxy[1]), int(xyxy[2]), int(xyxy[3])])
scores.append(conf)
return np.array(boxes), np.array(scores)
def train():
epochs = opt.epochs
batch_size = opt.batch_size
weights = opt.weights # initial training weights
# Configure
init_seeds(1)
#load dataset yaml
with open(opt.cfgs) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
train_path = data_dict['train']
test_path = data_dict['val']
nc = int(data_dict['nc'])
# Remove previous results
for f in glob.glob('*_batch*.jpg') + glob.glob(results_file):
os.remove(f)
# Create model
model = Model(opt.net, nc=data_dict['nc']).to(device)
# Image sizes
gs = int(max(model.stride)) # grid size (max stride)
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size]
# Optimizer
nbs = 64
accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
param['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_parameters():
if v.requires_grad:
if '.bias' in k:
pg2.append(v) # biases
elif '.weight' in k and '.bn' not in k:
pg1.append(v) # apply weight decay
else:
pg0.append(v) # all else
optimizer = optim.SGD(pg0, lr=param['init_lr'], momentum=param['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': param['weight_decay']}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
lf = lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.9 + 0.1 # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
print('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
# Load Model
google_utils.attempt_download(weights)
start_epoch, best_fitness = 0, 0.0
if weights.endswith('.pt'): # pytorch format
ckpt = torch.load(weights, map_location=device) # load checkpoint
# load model
try:
ckpt['model'] = {k: v for k, v in ckpt['model'].float().state_dict().items()
if model.state_dict()[k].shape == v.shape} # to FP32, filter
model.load_state_dict(ckpt['model'], strict=False)
except KeyError as e:
s = "%s is not compatible with %s. This may be due to model differences or %s may be out of date. " \
"Please delete or update %s and try again, or use --weights '' to train from scratch." \
% (opt.weights, opt.net, opt.weights, opt.weights)
raise KeyError(s) from e
# load optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
# load results
if ckpt.get('training_results') is not None:
with open(results_file, 'w') as file:
file.write(ckpt['training_results']) # write results.txt
# epochs
start_epoch = ckpt['epoch'] + 1
if epochs < start_epoch:
print('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
(opt.weights, ckpt['epoch'], epochs))
epochs += ckpt['epoch'] # finetune additional epochs
del ckpt
if mixed_precision:
model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0)
scheduler.last_epoch = start_epoch - 1 # do not move
if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available():
dist.init_process_group(backend='nccl', # distributed backend
init_method='tcp://127.0.0.1:9999', # init method
world_size=1, # number of nodes
rank=0) # node rank
model = torch.nn.parallel.DistributedDataParallel(model)
# pip install torch==1.4.0+cu100 torchvision==0.5.0+cu100 -f https://download.pytorch.org/whl/torch_stable.html
# Trainloader
dataloader, dataset = DataLoadModule(train_path, imgsz, batch_size, gs, opt,
hyp=param, augment=True, cache=False, rect=False)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Correct your labels or your model.' % (mlc, nc, opt.net)
# Testloader
testloader = DataLoadModule(test_path, imgsz_test, batch_size, gs, opt,
hyp=param, augment=False, cache=False, rect=True)[0]
# Model parameters
param['cls'] *= nc / 80.
model.nc = nc
model.hyp = param
model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
model.names = data_dict['names']
# Class frequency
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1.
# model._initialize_biases(cf.to(device))
if tb_writer:
plot_labels(labels)
tb_writer.add_histogram('classes', c, 0)
check_anchors(dataset, model=model, thr=param['anchor_t'], imgsz=imgsz)
# Exponential moving average
ema = torch_utils.ModelEMA(model)
# Start training
t0 = time.time()
nb = len(dataloader) # number of batches
n_burn = max(3 * nb, 1e3) # burn-in iterations, max(3 epochs, 1k iterations)
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
print('Image sizes %g train, %g test' % (imgsz, imgsz_test))
print('Using %g dataloader workers' % dataloader.num_workers)
print('Starting training for %g epochs...' % epochs)
# torch.autograd.set_detect_anomaly(True)
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if dataset.image_weights:
w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w)
dataset.indices = random.choices(range(dataset.n), weights=image_weights, k=dataset.n) # rand weighted idx
mloss = torch.zeros(4, device=device) # mean losses
print(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size'))
pbar = tqdm(enumerate(dataloader), total=nb) # progress bar
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
# Burn-in
if ni <= n_burn:
xi = [0, n_burn] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [0.9, param['momentum']])
# Forward
pred = model(imgs)
# Loss
loss, loss_items = compute_loss(pred, targets.to(device), model)
if not torch.isfinite(loss):
print('WARNING: non-finite loss, ending training ', loss_items)
return results
# Backward
if mixed_precision:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# Optimize
if ni % accumulate == 0:
optimizer.step()
optimizer.zero_grad()
ema.update(model)
# Print
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.4g' * 6) % (
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
pbar.set_description(s)
# Plot
if ni < 3:
f = 'train_batch%g.jpg' % ni # filename
result = plot_images(images=imgs, targets=targets, paths=paths, fname=f)
if tb_writer and result is not None:
tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
# end batch ------------------------------------------------------------------------------------------------
# Scheduler
scheduler.step()
# mAP
ema.update_attr(model)
final_epoch = epoch + 1 == epochs
no_test = False
if not no_test or final_epoch: # Calculate mAP
results, maps, times = evaluate.evaluate(opt.cfgs,
batch_size=batch_size,
imgsz=imgsz_test,
save_json=final_epoch and opt.cfgs.endswith(os.sep ),
model=ema.ema,
single_cls=False,
dataloader=testloader)
# Write
with open(results_file, 'a') as f:
f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
bucket = ''
if len(name) and bucket:
os.system('gsutil cp results.txt gs://%s/results/results%s.txt' % (bucket, name))
# Tensorboard
if tb_writer:
tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss',
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/F1',
'val/giou_loss', 'val/obj_loss', 'val/cls_loss']
for x, tag in zip(list(mloss[:-1]) + list(results), tags):
tb_writer.add_scalar(tag, x, epoch)
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1]
if fi > best_fitness:
best_fitness = fi
# Save model
with open(results_file, 'r') as f: # create checkpoint
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': f.read(),
'model': ema.ema,
'optimizer': None if final_epoch else optimizer.state_dict()}
# Save last, best and delete
torch.save(ckpt, last)
if (best_fitness == fi) and not final_epoch:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
# Strip optimizers
n = ('_' if len(opt.name) and not opt.name.isnumeric() else '') + opt.name
fresults, flast, fbest = 'results%s.txt' % n, wdir + 'last%s.pt' % n, wdir + 'best%s.pt' % n
for f1, f2 in zip([wdir + 'last.pt', wdir + 'best.pt', 'results.txt'], [flast, fbest, fresults]):
if os.path.exists(f1):
os.rename(f1, f2) # rename
ispt = f2.endswith('.pt') # is *.pt
strip_optimizer(f2) if ispt else None # strip optimizer
os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket and ispt else None # upload
# Finish
if not opt.evolve:
plot_results() # save as results.png
print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
dist.destroy_process_group() if device.type != 'cpu' and torch.cuda.device_count() > 1 else None
torch.cuda.empty_cache()
return results
if __name__ == '__main__':
check_git_status()
opt = parse_args()
ress = False
name = ''
opt.weights = last if ress and not opt.weights else opt.weights
opt.net = check_file(opt.net) # check file
opt.cfgs = check_file(opt.cfgs) # check file
print(opt)
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
device = torch_utils.select_device(opt.device, apex=mixed_precision, batch_size=opt.batch_size)
param = get_init()
if device.type == 'cpu':
mixed_precision = False
tb_writer = SummaryWriter(comment=name)
print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
train()
``` |
{
"source": "jiangbiaoah/nova",
"score": 2
} |
#### File: functional/libvirt/test_numa_live_migration.py
```python
import fixtures
from oslo_config import cfg
from oslo_log import log as logging
from nova.compute import manager as compute_manager
from nova.compute import resource_tracker as rt
from nova import context
from nova import objects
from nova import test
from nova.tests.functional import integrated_helpers
from nova.tests.functional.libvirt import base
from nova.tests.unit.virt.libvirt import fake_os_brick_connector
from nova.tests.unit.virt.libvirt import fakelibvirt
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class NUMALiveMigrationBase(base.ServersTestBase,
integrated_helpers.InstanceHelperMixin):
"""Base for all the test classes here. Gives us the NUMATopologyFilter and
small helper methods.
"""
api_major_version = 'v2.1'
microversion = 'latest'
ADDITIONAL_FILTERS = ['NUMATopologyFilter']
ADMIN_API = True
def setUp(self):
super(NUMALiveMigrationBase, self).setUp()
# NOTE(artom) There's a specific code path that we want to test.
# There's an instance.save() call in the compute manager's
# post_live_migration_at_destination(), and another instance.save()
# call in the libvirt driver's cleanup(), as called from
# _post_live_migration() in the compute manager. We want to make sure
# the latter does not clobber any NUMA topology information saved by
# the former. In order to trigger that code path, two things need to
# happen. First, the do_cleanup variable needs to be True, in order for
# driver.cleanup() to actually get called by _post_live_migration().
# Second, destroy_disks needs to be True as well, in order for
# cleanup() to enter the code block containing the instance.save()
# call. Both do_cleanup and destroy_disks are set by
# _live_migration_cleanup_flags(), so we just monkeypatch it to return
# what we want regardless of any shared storage configuration.
self.useFixture(fixtures.MonkeyPatch(
'nova.compute.manager.ComputeManager.'
'_live_migration_cleanup_flags',
lambda *args, **kwargs: (True, True)))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.connector',
fake_os_brick_connector))
def _migrate_stub(self, domain, destination, params, flags):
raise test.TestingException('_migrate_stub() must be implemented in '
' tests that expect the live migration '
' to start.')
def get_host(self, server_id):
server = self.api.get_server(server_id)
return server['OS-EXT-SRV-ATTR:host']
def _get_migration_context(self, instance_uuid):
ctxt = context.get_admin_context()
return objects.MigrationContext.get_by_instance_uuid(ctxt,
instance_uuid)
def _assert_instance_pinned_cpus(self, uuid, instance_cpus, host_cpus):
ctxt = context.get_admin_context()
topology = objects.InstanceNUMATopology.get_by_instance_uuid(
ctxt, uuid)
self.assertEqual(1, len(topology.cells))
# NOTE(artom) DictOfIntegersField has strings as keys, need to convert
self.assertCountEqual([str(cpu) for cpu in instance_cpus],
topology.cells[0].cpu_pinning_raw.keys())
self.assertCountEqual(host_cpus,
topology.cells[0].cpu_pinning_raw.values())
def _assert_host_consumed_cpus(self, host, cpus):
ctxt = context.get_admin_context()
topology = objects.NUMATopology.obj_from_db_obj(
objects.ComputeNode.get_by_nodename(ctxt, host).numa_topology)
self.assertCountEqual(cpus, topology.cells[0].pinned_cpus)
class NUMALiveMigrationPositiveBase(NUMALiveMigrationBase):
"""Base for all tests that expect the live migration to actually start.
Sets up an "environment" with two computes, each with 4 CPUs spead evenly
across 2 NUMA nodes.
"""
def setUp(self):
super(NUMALiveMigrationPositiveBase, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.tests.unit.virt.libvirt.fakelibvirt.Domain.migrateToURI3',
self._migrate_stub))
self.migrate_stub_ran = False
def start_computes_and_servers(self):
# Start 2 computes
self.start_compute(
hostname='host_a',
host_info=fakelibvirt.HostInfo(
cpu_nodes=1, cpu_sockets=1, cpu_cores=4, cpu_threads=1))
self.start_compute(
hostname='host_b',
host_info=fakelibvirt.HostInfo(
cpu_nodes=1, cpu_sockets=1, cpu_cores=4, cpu_threads=1))
# Create a 2-CPU flavor
extra_spec = {'hw:cpu_policy': 'dedicated'}
flavor = self._create_flavor(vcpu=2, extra_spec=extra_spec)
# Boot 2 servers with 2 CPUs each, one on host_a and one on host_b.
# Given the cpu_dedicated_set we set earlier, they should both be on
# CPUs 0,1.
for server_name, host in [('server_a', 'host_a'),
('server_b', 'host_b')]:
server = self._create_server(flavor_id=flavor, host=host,
networks='none')
setattr(self, server_name,
self._wait_for_state_change(server, 'ACTIVE'))
self.assertEqual(host, self.get_host(server['id']))
self._assert_instance_pinned_cpus(server['id'], [0, 1], [0, 1])
def _rpc_pin_host(self, hostname):
ctxt = context.get_admin_context()
dest_mgr = self.computes[hostname].manager
dest_mgr.compute_rpcapi = integrated_helpers.StubComputeRPCAPI(
'5.2')
self.assertFalse(
dest_mgr.compute_rpcapi.router.client(
ctxt).can_send_version('5.3'))
class NUMALiveMigrationPositiveTests(NUMALiveMigrationPositiveBase):
"""Tests that expect the live migration to succeed. Stubs out fakelibvirt's
migrateToURI3() with a stub that "suceeds" the migration.
"""
def _migrate_stub(self, domain, destination, params, flags):
"""This method is designed to stub out libvirt's migrateToURI3 in order
to test periodics running during the live migration. It also has the
nice side effect of giving us access to the destination XML so that we
can assert stuff about it. Because migrateToURI3 is spawned in a
background thread, this method does not block the upper Nova layers.
Because we don't want Nova to think the live migration has finished
until this method is done, the last thing we do is make fakelibvirt's
Domain.jobStats() return VIR_DOMAIN_JOB_COMPLETED.
"""
self.assertIsInstance(
self._get_migration_context(self.server_a['id']),
objects.MigrationContext)
# During the migration, server_a is consuming CPUs 0,1 on host_a, while
# all 4 of host_b's CPU are consumed by server_b and the incoming
# migration.
self._assert_host_consumed_cpus('host_a', [0, 1])
self._assert_host_consumed_cpus('host_b', [0, 1, 2, 3])
host_a_rp = self._get_provider_uuid_by_name('host_a')
host_b_rp = self._get_provider_uuid_by_name('host_b')
usages_a = self._get_provider_usages(host_a_rp)
usages_b = self._get_provider_usages(host_b_rp)
self.assertEqual(2, usages_a['PCPU'])
self.assertEqual(4, usages_b['PCPU'])
# In a real live migration, libvirt and QEMU on the source and
# destination talk it out, resulting in the instance starting to exist
# on the destination. Fakelibvirt cannot do that, so we have to
# manually create the "incoming" instance on the destination
# fakelibvirt.
dest = self.computes['host_b']
dest.driver._host.get_connection().createXML(
params['destination_xml'],
'fake-createXML-doesnt-care-about-flags')
# The resource update periodic task should not change the consumed
# CPUs, as the migration is still happening. The test should still pass
# without running periodics, this just makes sure updating available
# resources does the right thing.
self._run_periodics()
self._assert_host_consumed_cpus('host_a', [0, 1])
self._assert_host_consumed_cpus('host_b', [0, 1, 2, 3])
source = self.computes['host_a']
conn = source.driver._host.get_connection()
dom = conn.lookupByUUIDString(self.server_a['id'])
dom.complete_job()
self.migrate_stub_ran = True
def _test(self, pin_dest):
"""Live migrate the server on host_a to host_b.
"""
# Make sure instances initially land on "overlapping" CPUs on both
# hosts and boot 2 instances.
self.flags(cpu_dedicated_set='0,1', group='compute')
self.start_computes_and_servers()
# Increase cpu_dedicated_set to 0-3, expecting the live migrated server
# to end up on 2,3.
self.flags(cpu_dedicated_set='0-3', group='compute')
self.computes['host_a'] = self.restart_compute_service(
self.computes['host_a'])
self.computes['host_b'] = self.restart_compute_service(
self.computes['host_b'])
# Live migrate, RPC-pinning the destination host if asked
if pin_dest:
self._rpc_pin_host('host_b')
self._live_migrate(self.server_a, 'completed')
self.assertEqual('host_b', self.get_host(self.server_a['id']))
self.assertIsNone(self._get_migration_context(self.server_a['id']))
# At this point host_a should have no CPUs consumed (server_a has moved
# to host_b), and host_b should have all of its CPUs consumed. In
# addition, server_a should be pinned to 2,3 because 0,1 are used up by
# server_b on host_b. Check this, then run periodics and check again.
# Running periodics is not necessary for the test to pass, but it's
# good to know it does the right thing.
self._assert_host_consumed_cpus('host_a', [])
self._assert_host_consumed_cpus('host_b', [0, 1, 2, 3])
self._assert_instance_pinned_cpus(self.server_a['id'],
[0, 1], [2, 3])
self._run_periodics()
self._assert_host_consumed_cpus('host_a', [])
self._assert_host_consumed_cpus('host_b', [0, 1, 2, 3])
self._assert_instance_pinned_cpus(self.server_a['id'],
[0, 1], [2, 3])
self.assertTrue(self.migrate_stub_ran)
# TODO(artom) It'd be a good idea to live migrate in the other
# direction here.
def test_numa_live_migration(self):
self._test(pin_dest=False)
def test_numa_live_migration_dest_pinned(self):
self._test(pin_dest=True)
def test_bug_1843639(self):
"""Live migrations in 'accepted' status were not considered in progress
before the fix for 1845146 merged, and were ignored by the update
available resources periodic task. From the task's POV, live-migrating
instances with migration status 'accepted' were considered to be on the
source, and any resource claims on the destination would get
erroneously removed. For that to happen, the task had to run at just
the "right" time, when the migration was in 'accepted' and had not yet
been moved to 'queued' by live_migration() in the compute manager.
This test triggers this race by wrapping around live_migration() and
running the update available resources periodic task while the
migration is still in 'accepted'.
"""
self.live_migration_ran = False
orig_live_migration = compute_manager.ComputeManager.live_migration
def live_migration(*args, **kwargs):
self._run_periodics()
# During the migration, server_a is consuming CPUs 0,1 on host_a,
# while all 4 of host_b's CPU are consumed by server_b and the
# incoming # migration.
self._assert_host_consumed_cpus('host_a', [0, 1])
self._assert_host_consumed_cpus('host_b', [0, 1, 2, 3])
# The migration should also be in 'accepted' at this point in time.
ctxt = context.get_admin_context()
self.assertIsInstance(
objects.Migration.get_by_instance_and_status(
ctxt, self.server_a['id'], 'accepted'),
objects.Migration)
self.live_migration_ran = True
return orig_live_migration(*args, **kwargs)
self.useFixture(fixtures.MonkeyPatch(
'nova.compute.manager.ComputeManager.live_migration',
live_migration))
self._test(pin_dest=False)
self.assertTrue(self.live_migration_ran)
class NUMALiveMigrationRollbackTests(NUMALiveMigrationPositiveBase):
"""Tests that expect the live migration to fail, and exist to test the
rollback code. Stubs out fakelibvirt's migrateToURI3() with a stub that
"fails" the migration.
"""
def _migrate_stub(self, domain, destination, params, flags):
"""Designed to stub fakelibvirt's migrateToURI3 and "fail" the
live migration by monkeypatching jobStats() to return an error.
"""
self.assertIsInstance(
self._get_migration_context(self.server_a['id']),
objects.MigrationContext)
# During the migration, server_a is consuming CPUs 0,1 on host_a, while
# all 4 of host_b's CPU are consumed by server_b and the incoming
# migration.
self._assert_host_consumed_cpus('host_a', [0, 1])
self._assert_host_consumed_cpus('host_b', [0, 1, 2, 3])
# The resource update periodic task should not change the consumed
# CPUs, as the migration is still happening. As usual, running
# periodics is not necessary to make the test pass, but it's good to
# make sure it does the right thing.
self._run_periodics()
self._assert_host_consumed_cpus('host_a', [0, 1])
self._assert_host_consumed_cpus('host_b', [0, 1, 2, 3])
source = self.computes['host_a']
conn = source.driver._host.get_connection()
dom = conn.lookupByUUIDString(self.server_a['id'])
dom.fail_job()
self.migrate_stub_ran = True
def _test(self, pin_dest=False):
# Make sure instances initially land on "overlapping" CPUs on both
# hosts and boot 2 instances.
self.flags(cpu_dedicated_set='0,1', group='compute')
self.start_computes_and_servers()
# Increase cpu_dedicated_set to 0-3, expecting the live migrated server
# to end up on 2,3.
self.flags(cpu_dedicated_set='0-3', group='compute')
self.computes['host_a'] = self.restart_compute_service(
self.computes['host_a'])
self.computes['host_b'] = self.restart_compute_service(
self.computes['host_b'])
# Live migrate, RPC-pinning the destination host if asked. This is a
# rollback test, so server_a is expected to remain on host_a.
if pin_dest:
self._rpc_pin_host('host_b')
self._live_migrate(self.server_a, 'failed')
self.assertEqual('host_a', self.get_host(self.server_a['id']))
self.assertIsNone(self._get_migration_context(self.server_a['id']))
def _test_rollback(self, pin_dest=False):
self._test(pin_dest)
# Check consumed and pinned CPUs. Things should be as they were before
# the live migration, with CPUs 0,1 consumed on both hosts by the 2
# servers.
self._assert_host_consumed_cpus('host_a', [0, 1])
self._assert_host_consumed_cpus('host_b', [0, 1])
self._assert_instance_pinned_cpus(self.server_a['id'],
[0, 1], [0, 1])
def test_rollback(self):
self._test_rollback()
def test_rollback_pinned_dest(self):
self._test_rollback(pin_dest=True)
def _test_bug_1894095(self, pre_drop_race=False, post_drop_race=False):
"""Reproducer for bug #1894095 under live migration.
Demonstrate the possibility of races caused by running the resource
tracker's periodic task between marking a migration as failed and
dropping the claim for that migration on the destination host.
"""
orig_drop_move_claim = rt.ResourceTracker.drop_move_claim
def drop_move_claim(*args, **kwargs):
"""Run periodics after marking the migration confirmed, simulating
a race between the doing this and actually dropping the claim.
"""
# check the usage, which should show usage on both hosts: server_a
# on host_a, and server_b plus server_a's migration on host_b
self._assert_host_consumed_cpus('host_a', [0, 1])
self._assert_host_consumed_cpus('host_b', [0, 1, 2, 3])
if pre_drop_race:
self._run_periodics()
# FIXME(stephenfin): This is picking up server_a's "destination
# CPUs", intended for host_b, on host_a
self._assert_host_consumed_cpus('host_a', [2, 3])
self._assert_host_consumed_cpus('host_b', [0, 1, 2, 3])
# self._assert_host_consumed_cpus('host_a', [0, 1])
# self._assert_host_consumed_cpus('host_b', [0, 1, 2, 3])
result = orig_drop_move_claim(*args, **kwargs)
if pre_drop_race:
# FIXME(stephenfin): host_a's pinning information is still
# incorrect, which is expected since dropping the move claim
# makes no changes to the source host
self._assert_host_consumed_cpus('host_a', [2, 3])
else:
self._assert_host_consumed_cpus('host_a', [0, 1])
self._assert_host_consumed_cpus('host_b', [0, 1])
if post_drop_race:
self._run_periodics()
# FIXME(stephenfin): host_a is using the wrong pinned CPUs and
# host_b has regained its previously dropped allocation
self._assert_host_consumed_cpus('host_a', [2, 3])
self._assert_host_consumed_cpus('host_b', [0, 1, 2, 3])
# self._assert_host_consumed_cpus('host_a', [0, 1])
# self._assert_host_consumed_cpus('host_b', [0, 1])
return result
self.useFixture(fixtures.MonkeyPatch(
'nova.compute.resource_tracker.ResourceTracker.drop_move_claim',
drop_move_claim))
self._test()
self._run_periodics()
self._assert_host_consumed_cpus('host_a', [0, 1])
self._assert_host_consumed_cpus('host_b', [0, 1])
def test_bug_1894095_pre_drop(self):
self._test_bug_1894095(pre_drop_race=True)
def test_bug_1894095_post_drop(self):
self._test_bug_1894095(post_drop_race=True)
class NUMALiveMigrationLegacyBase(NUMALiveMigrationPositiveBase):
"""Base for tests that ensure that correct legacy behaviour is observed
when either the conductor or the source are pinned to an old RPC version.
Sets up two identical compute hosts and "fills" them with an instance each.
In such a situation, live migrating one of the instances should fail with
the new NUMA live migration code, but the old legacy behaviour is for the
live migration to go through (if forced through the API, thus bypassing the
scheduler).
"""
api_major_version = 'v2.1'
# NOTE(artom) After 2.67 we can no longer bypass the scheduler for live
# migration, which we need to do here to force the live migration to a host
# that's already full.
microversion = '2.67'
def setUp(self):
super(NUMALiveMigrationLegacyBase, self).setUp()
self.flags(compute='auto', group='upgrade_levels')
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.service.get_minimum_version_all_cells',
lambda *args, **kwargs: objects.service.SERVICE_VERSION))
def _test(self, pin_source, pin_cond, expect_success=True):
self.start_compute(
hostname='source',
host_info=fakelibvirt.HostInfo())
self.start_compute(
hostname='dest',
host_info=fakelibvirt.HostInfo())
ctxt = context.get_admin_context()
src_mgr = self.computes['source'].manager
cond_mgr = self.conductor.manager.compute_task_mgr
if pin_source:
src_mgr.compute_rpcapi = integrated_helpers.StubComputeRPCAPI(
'5.2')
else:
# Since we upgraded the RPC API to 6.0, we somehow need to pin the
# compute service here to 5.max to verify the legacy behaviours.
# TODO(sbauza): Remove this cruft
src_mgr.compute_rpcapi = integrated_helpers.StubComputeRPCAPI(
'5.13')
if pin_cond:
cond_mgr.compute_rpcapi = integrated_helpers.StubComputeRPCAPI(
'5.2')
else:
# Since we upgraded the RPC API to 6.0, we somehow need to pin the
# compute service here to 5.max to verify the legacy behaviours.
# TODO(sbauza): Remove this cruft
cond_mgr.compute_rpcapi = integrated_helpers.StubComputeRPCAPI(
'5.13')
self.assertEqual(
not pin_source,
src_mgr.compute_rpcapi.router.client(
ctxt).can_send_version('5.3'))
self.assertEqual(
not pin_cond,
cond_mgr.compute_rpcapi.router.client(
ctxt).can_send_version('5.3'))
extra_spec = {'hw:numa_nodes': 1,
'hw:cpu_policy': 'dedicated'}
flavor = self._create_flavor(vcpu=2, extra_spec=extra_spec)
server1 = self._create_server(flavor_id=flavor, networks='none')
server2 = self._create_server(flavor_id=flavor, networks='none')
if self.get_host(server1['id']) == 'source':
self.migrating_server = server1
else:
self.migrating_server = server2
self.api.post_server_action(
self.migrating_server['id'],
{'os-migrateLive': {'host': 'dest',
'block_migration': 'auto',
'force': True}})
self._wait_for_state_change(self.migrating_server, 'ACTIVE')
if expect_success:
final_host = 'dest'
self._wait_for_migration_status(self.migrating_server,
['completed'])
else:
final_host = 'source'
self._wait_for_migration_status(self.migrating_server, ['failed'])
self.assertEqual(final_host,
self.get_host(self.migrating_server['id']))
self.assertTrue(self.migrate_stub_ran)
class NUMALiveMigrationLegacyTests(NUMALiveMigrationLegacyBase):
"""Tests that legacy live migration behavior is observed when either the
source or the conductor are pinned to an old RPC version. Stubs
fakelibvirt's migrateToURI3 method with a stub that "succeeds" the
migration.
"""
def _migrate_stub(self, domain, destination, params, flags):
# NOTE(artom) This is the crucial bit: by asserting that the migrating
# instance has no migration context, we're making sure that we're
# hitting the old, pre-claims code paths.
self.assertIsNone(
self._get_migration_context(self.migrating_server['id']))
dest = self.computes['dest']
dest.driver._host.get_connection().createXML(
params['destination_xml'],
'fake-createXML-doesnt-care-about-flags')
source = self.computes['source']
conn = source.driver._host.get_connection()
dom = conn.lookupByUUIDString(self.migrating_server['id'])
dom.complete_job()
self.migrate_stub_ran = True
def test_source_pinned_dest_unpinned(self):
self._test(pin_source=True, pin_cond=False)
def test_conductor_pinned(self):
self._test(pin_source=False, pin_cond=True)
class NUMALiveMigrationLegacyRollbackTests(NUMALiveMigrationLegacyBase):
"""Tests that rollback works correctly when either the source or conductor
are pinned to an old RPC version. Stubs fakelibvirt's migrateToURI3 method
with a stub that "fails" the migraton in order to trigger rollback.
"""
def _migrate_stub(self, domain, destination, params, flags):
# NOTE(artom) This is the crucial bit: by asserting that the migrating
# instance has no migration context, we're making sure that we're
# hitting the old, pre-claims code paths.
self.assertIsNone(
self._get_migration_context(self.migrating_server['id']))
source = self.computes['source']
conn = source.driver._host.get_connection()
dom = conn.lookupByUUIDString(self.migrating_server['id'])
dom.fail_job()
self.migrate_stub_ran = True
def test_source_pinned_dest_unpinned(self):
self._test(pin_source=True, pin_cond=False, expect_success=False)
def test_conductor_pinned(self):
self._test(pin_source=False, pin_cond=True, expect_success=False)
class NUMALiveMigrationNegativeTests(NUMALiveMigrationBase):
"""Tests that live migrations are refused if the instance cannot fit on the
destination host (even if the scheduler was bypassed by forcing in the
API).
"""
api_major_version = 'v2.1'
# NOTE(artom) We're trying to test the new NUMA live migration claims, not
# the scheduler, so we use microversion 2.67, which is the last one where
# we can still bypass the scheduler and force a live migration to a host.
microversion = '2.67'
def test_insufficient_resources(self):
self.start_compute(
hostname='host_a',
host_info=fakelibvirt.HostInfo(
cpu_nodes=1, cpu_sockets=1, cpu_cores=3, cpu_threads=1))
self.start_compute(
hostname='host_b',
host_info=fakelibvirt.HostInfo(
cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=1))
extra_spec = {'hw:numa_nodes': 1,
'hw:cpu_policy': 'dedicated'}
flavor = self._create_flavor(vcpu=3, extra_spec=extra_spec)
server = self._build_server(
flavor_id=flavor,
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6')
server['networks'] = 'none'
post = {'server': server}
server = self.api.post_server(post)
self._wait_for_state_change(server, 'ACTIVE')
self.assertEqual('host_a', self.get_host(server['id']))
# NOTE(artom) Because we use the CastAsCall fixture, we expect the
# MigrationPreCheckError to be bubbled up to the API as an error 500.
# TODO(artom) Stop using CastAsCall to make it more realistic.
self.api.api_post(
'/servers/%s/action' % server['id'],
{'os-migrateLive': {'host': 'host_b',
'block_migration': 'auto',
'force': True}},
check_response_status=[500])
self._wait_for_state_change(server, 'ACTIVE')
self._wait_for_migration_status(server, ['error'])
self.assertIsNone(self._get_migration_context(server['id']))
self.assertEqual('host_a', self.get_host(server['id']))
log_out = self.stdlog.logger.output
self.assertIn('Migration pre-check error: '
'Insufficient compute resources: '
'Requested instance NUMA topology cannot fit', log_out)
def test_different_page_sizes(self):
self.start_compute(
hostname='host_a',
host_info=fakelibvirt.HostInfo(
kB_mem=1024000, mempages={
0: fakelibvirt.create_mempages([(4, 256000), (1024, 1000)])
}))
self.start_compute(
hostname='host_b',
host_info=fakelibvirt.HostInfo(
kB_mem=1024000, mempages={
0: fakelibvirt.create_mempages([(4, 256000), (2048, 500)]),
}))
extra_spec = {'hw:numa_nodes': 1,
'hw:cpu_policy': 'dedicated',
'hw:mem_page_size': 'large'}
flavor = self._create_flavor(vcpu=2, memory_mb=512,
extra_spec=extra_spec)
server = self._build_server(
flavor_id=flavor,
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6')
server['networks'] = 'none'
post = {'server': server}
server = self.api.post_server(post)
self._wait_for_state_change(server, 'ACTIVE')
initial_host = self.get_host(server['id'])
dest_host = 'host_a' if initial_host == 'host_b' else 'host_b'
# NOTE(artom) Because we use the CastAsCall fixture, we expect the
# MigrationPreCheckError to be bubbled up to the API as an error 500.
# TODO(artom) Stop using CastAsCall to make it more realistic.
self.api.api_post(
'/servers/%s/action' % server['id'],
{'os-migrateLive': {'host': dest_host,
'block_migration': 'auto',
'force': True}},
check_response_status=[500])
self._wait_for_state_change(server, 'ACTIVE')
self._wait_for_migration_status(server, ['error'])
self.assertEqual(initial_host, self.get_host(server['id']))
self.assertIsNone(self._get_migration_context(server['id']))
log_out = self.stdlog.logger.output
self.assertIn('Migration pre-check error: '
'Insufficient compute resources: '
'Requested page size is different from current page '
'size.', log_out)
```
#### File: functional/libvirt/test_vpmem.py
```python
import fixtures
from oslo_config import cfg
from oslo_log import log as logging
from nova import objects
from nova.tests.functional.libvirt import integrated_helpers
from nova.tests.unit.virt.libvirt import fake_imagebackend
from nova.tests.unit.virt.libvirt import fakelibvirt
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class VPMEMTestBase(integrated_helpers.LibvirtProviderUsageBaseTestCase):
FAKE_LIBVIRT_VERSION = 5000000
FAKE_QEMU_VERSION = 3001000
def setUp(self):
super(VPMEMTestBase, self).setUp()
self.flags(pmem_namespaces="4GB:ns_0,SMALL:ns_1|ns_2",
group='libvirt')
self.fake_pmem_namespaces = '''
[{"dev":"namespace0.0",
"mode":"devdax",
"map":"mem",
"size":4292870144,
"uuid":"24ffd5e4-2b39-4f28-88b3-d6dc1ec44863",
"daxregion":{"id": 0, "size": 4292870144,"align": 2097152,
"devices":[{"chardev":"dax0.0",
"size":4292870144}]},
"name":"ns_0",
"numa_node":0},
{"dev":"namespace0.1",
"mode":"devdax",
"map":"mem",
"size":4292870144,
"uuid":"ac64fe52-de38-465b-b32b-947a6773ac66",
"daxregion":{"id": 0, "size": 4292870144,"align": 2097152,
"devices":[{"chardev":"dax0.1",
"size":4292870144}]},
"name":"ns_1",
"numa_node":0},
{"dev":"namespace0.2",
"mode":"devdax",
"map":"mem",
"size":4292870144,
"uuid":"2ff41eba-db9c-4bb9-a959-31d992568a3e",
"raw_uuid":"0b61823b-5668-4856-842d-c644dae83410",
"daxregion":{"id":0, "size":4292870144, "align":2097152,
"devices":[{"chardev":"dax0.2",
"size":4292870144}]},
"name":"ns_2",
"numa_node":0}]'''
self.useFixture(fixtures.MockPatch(
'nova.privsep.libvirt.cleanup_vpmem'))
self.useFixture(fixtures.MockPatch(
'nova.privsep.libvirt.get_pmem_namespaces',
return_value=self.fake_pmem_namespaces))
self.useFixture(fake_imagebackend.ImageBackendFixture())
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
return_value={'total': 128,
'used': 44,
'free': 84}))
self.mock_conn = self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.host.Host._get_new_connection')).mock
def _get_connection(self, host_info, hostname=None):
fake_connection = fakelibvirt.Connection(
'qemu:///system',
version=self.FAKE_LIBVIRT_VERSION,
hv_version=self.FAKE_QEMU_VERSION,
host_info=host_info,
hostname=hostname)
return fake_connection
def _start_compute_service(self, hostname):
fake_connection = self._get_connection(
# Need a host to support creating more servers with vpmems
host_info=fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1,
cpu_cores=2, cpu_threads=2),
hostname=hostname)
self.mock_conn.return_value = fake_connection
compute = self._start_compute(host=hostname)
# Ensure populating the existing pmems correctly.
vpmems = compute.driver._vpmems_by_name
expected_vpmems = {
'ns_0': objects.LibvirtVPMEMDevice(
label='4GB', name='ns_0', devpath='/dev/dax0.0',
size=4292870144, align=2097152),
'ns_1': objects.LibvirtVPMEMDevice(
label='SMALL', name='ns_1', devpath='/dev/dax0.1',
size=4292870144, align=2097152),
'ns_2': objects.LibvirtVPMEMDevice(
label='SMALL', name='ns_2', devpath='/dev/dax0.2',
size=4292870144, align=2097152)}
self.assertDictEqual(expected_vpmems, vpmems)
# Ensure reporting vpmems resources correctly
rp_uuid = self._get_provider_uuid_by_host(compute.host)
inventory = self._get_provider_inventory(rp_uuid)
self.assertEqual(1, inventory['CUSTOM_PMEM_NAMESPACE_4GB']['total'])
self.assertEqual(2, inventory['CUSTOM_PMEM_NAMESPACE_SMALL']['total'])
return compute
def _create_server(self, flavor_id, hostname, expected_state):
return super(VPMEMTestBase, self)._create_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=flavor_id,
networks='none',
az='nova:%s' % hostname,
expected_state=expected_state)
def _delete_server(self, server):
self.api.delete_server(server['id'])
def _check_vpmem_allocations(self, vpmem_allocs, server_id, cn_uuid):
cn_allocs = self._get_allocations_by_server_uuid(
server_id)[cn_uuid]['resources']
for rc, amount in vpmem_allocs.items():
self.assertEqual(amount, cn_allocs[rc])
class VPMEMTests(VPMEMTestBase):
def setUp(self):
super(VPMEMTests, self).setUp()
extra_spec = {"hw:pmem": "SMALL"}
self.flavor = self._create_flavor(extra_spec=extra_spec)
def test_create_servers_with_vpmem(self):
# Start one compute service
self.compute1 = self._start_compute_service('host1')
cn1_uuid = self._get_provider_uuid_by_host(self.compute1.host)
# Boot two servers with pmem
server1 = self._create_server(self.flavor, self.compute1.host,
expected_state='ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server1['id'], cn1_uuid)
server2 = self._create_server(self.flavor, self.compute1.host,
expected_state='ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server2['id'], cn1_uuid)
# 'SMALL' VPMEM resource has used up
server3 = self._create_server(self.flavor, self.compute1.host,
expected_state='ERROR')
# Delete server2, one 'SMALL' VPMEM will be released
self._delete_server(server2)
self._wait_until_deleted(server2)
server3 = self._create_server(self.flavor, self.compute1.host,
expected_state='ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server3['id'], cn1_uuid)
class VPMEMResizeTests(VPMEMTestBase):
def setUp(self):
super(VPMEMResizeTests, self).setUp()
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.LibvirtDriver._get_instance_disk_info',
return_value=[]))
self.useFixture(fixtures.MockPatch('os.rename'))
extra_spec = {"hw:pmem": "SMALL"}
self.flavor1 = self._create_flavor(extra_spec=extra_spec)
extra_spec = {"hw:pmem": "4GB,SMALL"}
self.flavor2 = self._create_flavor(extra_spec=extra_spec)
def _resize_server(self, server, flavor):
resize_req = {
'resize': {
'flavorRef': flavor
}
}
self.api.api_post('/servers/%s/action' % server['id'],
resize_req)
def _confirm_resize(self, server):
confirm_resize_req = {'confirmResize': None}
self.api.api_post('/servers/%s/action' % server['id'],
confirm_resize_req)
def _revert_resize(self, server):
revert_resize_req = {'revertResize': None}
self.api.api_post('/servers/%s/action' % server['id'],
revert_resize_req)
def test_resize(self):
self.flags(allow_resize_to_same_host=False)
# Start two compute nodes
self.compute1 = self._start_compute_service('host1')
self.compute2 = self._start_compute_service('host2')
cn1_uuid = self._get_provider_uuid_by_host(self.compute1.host)
cn2_uuid = self._get_provider_uuid_by_host(self.compute2.host)
# Boot one server with pmem, then resize the server
server = self._create_server(self.flavor1, self.compute1.host,
expected_state='ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid)
# Revert resize
self._resize_server(server, self.flavor2)
self._wait_for_state_change(server, 'VERIFY_RESIZE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn2_uuid)
self._revert_resize(server)
self._wait_for_state_change(server, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid)
# Confirm resize
self._resize_server(server, self.flavor2)
self._wait_for_state_change(server, 'VERIFY_RESIZE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn2_uuid)
self._confirm_resize(server)
self._wait_for_state_change(server, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn2_uuid)
def test_resize_same_host(self):
self.flags(allow_resize_to_same_host=True)
# Start one compute nodes
self.compute1 = self._start_compute_service('host1')
cn1_uuid = self._get_provider_uuid_by_host(self.compute1.host)
# Boot one server with pmem, then resize the server
server = self._create_server(self.flavor1, self.compute1.host,
expected_state='ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid)
# Revert resize
self._resize_server(server, self.flavor2)
self._wait_for_state_change(server, 'VERIFY_RESIZE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid)
self._revert_resize(server)
self._wait_for_state_change(server, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid)
# Confirm resize
self._resize_server(server, self.flavor2)
self._wait_for_state_change(server, 'VERIFY_RESIZE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid)
self._confirm_resize(server)
self._wait_for_state_change(server, 'ACTIVE')
self._check_vpmem_allocations({'CUSTOM_PMEM_NAMESPACE_4GB': 1,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1},
server['id'], cn1_uuid)
``` |
{
"source": "JiangboHe/sonic-py-swsssdk",
"score": 2
} |
#### File: src/swsssdk/dbconnector.py
```python
from . import logger
from .interface import DBInterface
import os
import json
# FIXME: Convert to metaclasses when Py2 support is removed. Metaclasses have unique interfaces to Python2/Python3.
class SonicDBConfig(object):
SONIC_DB_CONFIG_FILE = "/var/run/redis/sonic-db/database_config.json"
_sonic_db_config_init = False
_sonic_db_config = {}
@staticmethod
def load_sonic_db_config(sonic_db_file_path=SONIC_DB_CONFIG_FILE):
"""
Get multiple database config from the database_config.json
"""
if SonicDBConfig._sonic_db_config_init == True:
return
try:
if os.path.isfile(sonic_db_file_path) == False:
msg = "'{}' is not found, it is not expected in production devices!!".format(sonic_db_file_path)
logger.warning(msg)
sonic_db_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config', 'database_config.json')
with open(sonic_db_file_path, "r") as read_file:
SonicDBConfig._sonic_db_config = json.load(read_file)
except (OSError, IOError):
msg = "Could not open sonic database config file '{}'".format(sonic_db_file_path)
logger.exception(msg)
raise RuntimeError(msg)
SonicDBConfig._sonic_db_config_init = True
@staticmethod
def db_name_validation(db_name):
if SonicDBConfig._sonic_db_config_init == False:
SonicDBConfig.load_sonic_db_config()
if db_name not in SonicDBConfig._sonic_db_config["DATABASES"]:
msg = "{} is not a valid database name in configuration file".format(db_name)
logger.exception(msg)
raise RuntimeError(msg)
@staticmethod
def inst_name_validation(inst_name):
if SonicDBConfig._sonic_db_config_init == False:
SonicDBConfig.load_sonic_db_config()
if inst_name not in SonicDBConfig._sonic_db_config["INSTANCES"]:
msg = "{} is not a valid instance name in configuration file".format(inst_name)
logger.exception(msg)
raise RuntimeError(msg)
@staticmethod
def get_dblist():
if SonicDBConfig._sonic_db_config_init == False:
SonicDBConfig.load_sonic_db_config()
return SonicDBConfig._sonic_db_config["DATABASES"].keys()
@staticmethod
def get_instance(db_name):
if SonicDBConfig._sonic_db_config_init == False:
SonicDBConfig.load_sonic_db_config()
SonicDBConfig.db_name_validation(db_name)
inst_name = SonicDBConfig._sonic_db_config["DATABASES"][db_name]["instance"]
SonicDBConfig.inst_name_validation(inst_name)
return SonicDBConfig._sonic_db_config["INSTANCES"][inst_name]
@staticmethod
def get_socket(db_name):
if SonicDBConfig._sonic_db_config_init == False:
SonicDBConfig.load_sonic_db_config()
SonicDBConfig.db_name_validation(db_name)
return SonicDBConfig.get_instance(db_name)["unix_socket_path"]
@staticmethod
def get_hostname(db_name):
if SonicDBConfig._sonic_db_config_init == False:
SonicDBConfig.load_sonic_db_config()
SonicDBConfig.db_name_validation(db_name)
return SonicDBConfig.get_instance(db_name)["hostname"]
@staticmethod
def get_port(db_name):
if SonicDBConfig._sonic_db_config_init == False:
SonicDBConfig.load_sonic_db_config()
SonicDBConfig.db_name_validation(db_name)
return SonicDBConfig.get_instance(db_name)["port"]
@staticmethod
def get_dbid(db_name):
if SonicDBConfig._sonic_db_config_init == False:
SonicDBConfig.load_sonic_db_config()
SonicDBConfig.db_name_validation(db_name)
return SonicDBConfig._sonic_db_config["DATABASES"][db_name]["id"]
@staticmethod
def get_separator(db_name):
if SonicDBConfig._sonic_db_config_init == False:
SonicDBConfig.load_sonic_db_config()
SonicDBConfig.db_name_validation(db_name)
return SonicDBConfig._sonic_db_config["DATABASES"][db_name]["separator"]
class SonicV2Connector(DBInterface):
def __init__(self, use_unix_socket_path=False, **kwargs):
super(SonicV2Connector, self).__init__(**kwargs)
self.use_unix_socket_path = use_unix_socket_path
for db_name in self.get_db_list():
# set a database name as a constant value attribute.
setattr(self, db_name, db_name)
def connect(self, db_name, retry_on=True):
if self.use_unix_socket_path:
self.redis_kwargs["unix_socket_path"] = self.get_db_socket(db_name)
self.redis_kwargs["host"] = None
self.redis_kwargs["port"] = None
else:
self.redis_kwargs["host"] = self.get_db_hostname(db_name)
self.redis_kwargs["port"] = self.get_db_port(db_name)
self.redis_kwargs["unix_socket_path"] = None
db_id = self.get_dbid(db_name)
super(SonicV2Connector, self).connect(db_id, retry_on)
def close(self, db_name):
db_id = self.get_dbid(db_name)
super(SonicV2Connector, self).close(db_id)
def get_db_list(self):
return SonicDBConfig.get_dblist()
def get_db_instance(self, db_name):
return SonicDBConfig.get_instance(db_name)
def get_db_socket(self, db_name):
return SonicDBConfig.get_socket(db_name)
def get_db_hostname(self, db_name):
return SonicDBConfig.get_hostname(db_name)
def get_db_port(self, db_name):
return SonicDBConfig.get_port(db_name)
def get_dbid(self, db_name):
return SonicDBConfig.get_dbid(db_name)
def get_db_separator(self, db_name):
return SonicDBConfig.get_separator(db_name)
def get_redis_client(self, db_name):
db_id = self.get_dbid(db_name)
return super(SonicV2Connector, self).get_redis_client(db_id)
def publish(self, db_name, channel, message):
db_id = self.get_dbid(db_name)
return super(SonicV2Connector, self).publish(db_id, channel, message)
def expire(self, db_name, key, timeout_sec):
db_id = self.get_dbid(db_name)
return super(SonicV2Connector, self).expire(db_id, key, timeout_sec)
def exists(self, db_name, key):
db_id = self.get_dbid(db_name)
return super(SonicV2Connector, self).exists(db_id, key)
def keys(self, db_name, pattern='*', *args, **kwargs):
db_id = self.get_dbid(db_name)
return super(SonicV2Connector, self).keys(db_id, pattern, *args, **kwargs)
def get(self, db_name, _hash, key, *args, **kwargs):
db_id = self.get_dbid(db_name)
return super(SonicV2Connector, self).get(db_id, _hash, key, *args, **kwargs)
def get_all(self, db_name, _hash, *args, **kwargs):
db_id = self.get_dbid(db_name)
return super(SonicV2Connector, self).get_all(db_id, _hash, *args, **kwargs)
def set(self, db_name, _hash, key, val, *args, **kwargs):
db_id = self.get_dbid(db_name)
return super(SonicV2Connector, self).set(db_id, _hash, key, val, *args, **kwargs)
def delete(self, db_name, key, *args, **kwargs):
db_id = self.get_dbid(db_name)
return super(SonicV2Connector, self).delete(db_id, key, *args, **kwargs)
def delete_all_by_pattern(self, db_name, pattern, *args, **kwargs):
db_id = self.get_dbid(db_name)
super(SonicV2Connector, self).delete_all_by_pattern(db_id, pattern, *args, **kwargs)
pass
``` |
{
"source": "JiangboHe/sonic-swss",
"score": 2
} |
#### File: sonic-swss/tests/test_acl_ctrl.py
```python
import time
import pytest
from swsscommon import swsscommon
class TestPortChannelAcl(object):
def setup_db(self, dvs):
self.pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0)
self.adb = swsscommon.DBConnector(1, dvs.redis_sock, 0)
self.cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0)
def create_acl_table(self, dvs):
tbl = swsscommon.Table(self.cdb, "ACL_TABLE")
fvs = swsscommon.FieldValuePairs([("POLICY_DESC", "CTRL_ACL_TEST"),
("TYPE", "CTRLPLANE"),
("SERVICES@", "SNMP")])
tbl.set("CTRL_ACL_TABLE", fvs)
time.sleep(1)
def remove_acl_table(self, dvs):
tbl = swsscommon.Table(self.cdb, "ACL_TABLE")
tbl._del("CTRL_ACL_TABLE")
time.sleep(1)
def create_acl_rule(self, dvs):
tbl = swsscommon.Table(self.cdb, "ACL_RULE")
fvs = swsscommon.FieldValuePairs([("PRIORITY", "88"),
("PACKET_ACTION", "FORWARD"),
("L4_SRC_PORT", "8888")])
tbl.set("CTRL_ACL_TABLE|CTRL_ACL_RULE", fvs)
time.sleep(1)
def remove_acl_rule(self, dvs):
tbl = swsscommon.Table(self.cdb, "ACL_RULE")
tbl._del("CTRL_ACL_TABLE|CTRL_ACL_RULE")
time.sleep(1)
def check_asic_table_absent(self, dvs):
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE")
acl_tables = tbl.getKeys()
for key in dvs.asicdb.default_acl_tables:
assert key in acl_tables
acl_tables = [k for k in acl_tables if k not in dvs.asicdb.default_acl_tables]
assert len(acl_tables) == 0
def check_asic_rule_absent(self, dvs):
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY")
acl_entries = tbl.getKeys()
for key in dvs.asicdb.default_acl_entries:
assert key in acl_entries
acl_entries = [k for k in acl_entries if k not in dvs.asicdb.default_acl_entries]
assert len(acl_entries) == 0
def test_AclCtrl(self, dvs):
self.setup_db(dvs)
# create ACL table and ACL rule
self.create_acl_table(dvs)
self.create_acl_rule(dvs)
# check ASIC table
self.check_asic_table_absent(dvs)
self.check_asic_rule_absent(dvs)
# remove ACL table
self.remove_acl_table(dvs)
self.remove_acl_rule(dvs)
# Add Dummy always-pass test at end as workaroud
# for issue when Flaky fail on final test it invokes module tear-down before retrying
def test_nonflaky_dummy():
pass
```
#### File: sonic-swss/tests/test_port_dpb_acl.py
```python
import pytest
from port_dpb import DPB
maxPorts = 32
maxBreakout = 4
maxRootPorts = maxPorts/maxBreakout
maxAclTables = 16
@pytest.mark.usefixtures('dpb_setup_fixture')
@pytest.mark.xfail(reason="sonic cfggen bug: buildimage#5263")
class TestPortDPBAcl(object):
def test_acl_table_empty_port_list(self, dvs_acl):
# Create ACL table "test" and bind it to Ethernet0
bind_ports = []
dvs_acl.create_acl_table("test", "L3", bind_ports)
dvs_acl.verify_acl_table_count(1)
dvs_acl.verify_acl_table_groups(0)
bind_ports = ["Ethernet0"]
dvs_acl.update_acl_table_port_list("test", bind_ports)
# Verify table, group, and member have been created
dvs_acl.verify_acl_table_count(1)
dvs_acl.verify_acl_table_groups(1)
acl_table_ids = dvs_acl.get_acl_table_ids(1)
dvs_acl.verify_acl_table_port_binding(acl_table_ids[0], bind_ports, 1)
bind_ports = []
dvs_acl.update_acl_table_port_list("test", bind_ports)
dvs_acl.verify_acl_table_count(1)
dvs_acl.verify_acl_table_groups(0)
def test_one_port_two_acl_tables(self, dvs_acl):
# Create ACL table "test" and bind it to Ethernet0
bind_ports = ["Ethernet0"]
dvs_acl.create_acl_table("test", "L3", bind_ports)
dvs_acl.verify_acl_table_count(1)
dvs_acl.verify_acl_table_groups(1)
acl_table_ids = dvs_acl.get_acl_table_ids(1)
dvs_acl.verify_acl_table_port_binding(acl_table_ids[0], bind_ports, 1)
# Create ACL table "test1" and bind it to Ethernet0
bind_ports = ["Ethernet0"]
dvs_acl.create_acl_table("test1", "L3", bind_ports)
dvs_acl.verify_acl_table_count(2)
dvs_acl.verify_acl_table_groups(1)
acl_table_ids = dvs_acl.get_acl_table_ids(2)
dvs_acl.verify_acl_table_port_binding(acl_table_ids[0], bind_ports, 2)
dvs_acl.verify_acl_table_port_binding(acl_table_ids[1], bind_ports, 2)
# Delete ACL tables
dvs_acl.remove_acl_table("test")
dvs_acl.verify_acl_table_count(1)
dvs_acl.verify_acl_table_groups(1)
dvs_acl.remove_acl_table("test1")
dvs_acl.verify_acl_table_count(0)
dvs_acl.verify_acl_table_groups(0)
def test_one_acl_table_many_ports(self, dvs, dvs_acl):
# Create ACL table and bind it to Ethernet0 and Ethernet4
bind_ports = ["Ethernet0", "Ethernet4"]
dvs_acl.create_acl_table("test", "L3", bind_ports)
dvs_acl.verify_acl_table_count(1)
dvs_acl.verify_acl_table_groups(2)
acl_table_ids = dvs_acl.get_acl_table_ids(1)
dvs_acl.verify_acl_table_port_binding(acl_table_ids[0], bind_ports, 1)
# Update bind list and verify
bind_ports = ["Ethernet4"]
dvs_acl.update_acl_table_port_list("test", bind_ports)
dvs_acl.verify_acl_table_groups(1)
acl_table_ids = dvs_acl.get_acl_table_ids(1)
dvs_acl.verify_acl_table_port_binding(acl_table_ids[0], bind_ports, 1)
# Breakout Ethernet0
dpb = DPB()
dpb.breakout(dvs, "Ethernet0", maxBreakout)
# Update bind list and verify
bind_ports = ["Ethernet0", "Ethernet1", "Ethernet2", "Ethernet3", "Ethernet4"]
dvs_acl.update_acl_table_port_list("test", bind_ports)
dvs_acl.verify_acl_table_groups(5)
dvs_acl.verify_acl_table_port_binding(acl_table_ids[0], bind_ports, 1)
# Update bind list and verify
bind_ports = ["Ethernet4"]
dvs_acl.update_acl_table_port_list("test", bind_ports)
dvs_acl.verify_acl_table_groups(1)
dvs_acl.verify_acl_table_port_binding(acl_table_ids[0], bind_ports, 1)
# Breakin Ethernet0, 1, 2, 3
dpb.breakin(dvs, ["Ethernet0", "Ethernet1", "Ethernet2", "Ethernet3"])
# Update bind list and verify
bind_ports = ["Ethernet0", "Ethernet4"]
dvs_acl.update_acl_table_port_list("test", bind_ports)
dvs_acl.verify_acl_table_groups(2)
dvs_acl.verify_acl_table_port_binding(acl_table_ids[0], bind_ports, 1)
# Delete ACL table
dvs_acl.remove_acl_table("test")
dvs_acl.verify_acl_table_groups(0)
def test_one_port_many_acl_tables(self, dvs, dvs_acl):
# Create 4 ACL tables and bind them to Ethernet0
bind_ports = ["Ethernet0"]
acl_tables = ["test1", "test2", "test3", "test4"]
for acl_tbl in acl_tables:
dvs_acl.create_acl_table(acl_tbl, "L3", bind_ports)
dvs_acl.verify_acl_table_count(len(acl_tables))
dvs_acl.verify_acl_table_groups(len(bind_ports))
acl_table_ids = dvs_acl.get_acl_table_ids(len(acl_tables))
for acl_tbl_id in acl_table_ids:
dvs_acl.verify_acl_table_port_binding(acl_tbl_id, bind_ports, len(acl_tables))
# Update bind list and verify
bind_ports = []
for acl_tbl in acl_tables:
dvs_acl.update_acl_table_port_list(acl_tbl, bind_ports)
dvs_acl.verify_acl_table_groups(0)
# Breakout Ethernet0
dpb = DPB()
dpb.breakout(dvs, "Ethernet0", maxBreakout)
# Breakin Ethernet0, 1, 2, 3
dpb.breakin(dvs, ["Ethernet0", "Ethernet1", "Ethernet2", "Ethernet3"])
for acl_tbl in acl_tables:
dvs_acl.remove_acl_table(acl_tbl)
def test_many_ports_many_acl_tables(self, dvs, dvs_acl):
# Prepare ACL table names
aclTableNames = []
for i in range(maxAclTables):
aclTableNames.append("aclTable" + str(i+1))
# Prepare all port names
portNames = []
for i in range(maxPorts):
portNames.append("Ethernet" + str(i))
# Prepare root port names
rootPortNames = []
for i in range(0, maxPorts, maxBreakout):
rootPortNames.append("Ethernet" + str(i))
# Create ACL tables and bind root ports
for aclTable in aclTableNames:
dvs_acl.create_acl_table(aclTable, "L3", rootPortNames)
dvs_acl.verify_acl_table_groups(maxRootPorts)
# Remove the dependency on all root ports by
# unbinding them from all ACL tables.
bind_ports = []
for aclTable in aclTableNames:
dvs_acl.update_acl_table_port_list(aclTable, bind_ports)
dvs_acl.verify_acl_table_groups(0)
# Breakout all root ports
dpb = DPB()
for pName in rootPortNames:
dpb.breakout(dvs, pName, maxBreakout)
# Add all ports to aclTable1
dvs_acl.update_acl_table_port_list(aclTableNames[0], portNames)
dvs_acl.verify_acl_table_groups(maxPorts)
# Remove all ports from aclTable1
bind_ports = []
dvs_acl.update_acl_table_port_list(aclTableNames[0], bind_ports)
dvs_acl.verify_acl_table_groups(0)
# Breakin all ports
for i in range(0, maxPorts, maxBreakout):
dpb.breakin(dvs, portNames[i:i+maxBreakout])
for aclTable in aclTableNames:
dvs_acl.remove_acl_table(aclTable)
dvs_acl.verify_acl_table_count(0)
# Add Dummy always-pass test at end as workaroud
# for issue when Flaky fail on final test it invokes module tear-down before retrying
def test_nonflaky_dummy():
pass
``` |
{
"source": "JiangBowen0008/bop_toolkit",
"score": 3
} |
#### File: bop_toolkit/bop_toolkit_lib/renderer.py
```python
class Renderer(object):
"""Abstract class of a renderer."""
def __init__(self, width, height):
"""Constructor.
:param width: Width of the rendered image.
:param height: Height of the rendered image.
"""
self.width = width
self.height = height
# 3D location of a point light (in the camera coordinates).
self.light_cam_pos = (0, 0, 0)
# Set light color and weights.
self.light_color = (1.0, 1.0, 1.0) # Used only in C++ renderer.
self.light_ambient_weight = 0.5
self.light_diffuse_weight = 1.0 # Used only in C++ renderer.
self.light_specular_weight = 0.0 # Used only in C++ renderer.
self.light_specular_shininess = 0.0 # Used only in C++ renderer.
def set_light_cam_pos(self, light_cam_pos):
"""Sets the 3D location of a point light.
:param light_cam_pos: [X, Y, Z].
"""
self.light_cam_pos = light_cam_pos
def set_light_ambient_weight(self, light_ambient_weight):
"""Sets weight of the ambient light.
:param light_ambient_weight: Scalar from 0 to 1.
"""
self.light_ambient_weight = light_ambient_weight
def add_object(self, obj_id, model_path, **kwargs):
"""Loads an object model.
:param obj_id: Object identifier.
:param model_path: Path to the object model file.
"""
raise NotImplementedError
def remove_object(self, obj_id):
"""Removes an object model.
:param obj_id: Identifier of the object to remove.
"""
raise NotImplementedError
def render_object(self, obj_id, R, t, fx, fy, cx, cy):
"""Renders an object model in the specified pose.
:param obj_id: Object identifier.
:param R: 3x3 ndarray with a rotation matrix.
:param t: 3x1 ndarray with a translation vector.
:param fx: Focal length (X axis).
:param fy: Focal length (Y axis).
:param cx: The X coordinate of the principal point.
:param cy: The Y coordinate of the principal point.
:return: Returns a dictionary with rendered images.
"""
raise NotImplementedError
def create_renderer(width, height, renderer_type='cpp', mode='rgb+depth',
shading='phong', bg_color=(0.0, 0.0, 0.0, 0.0)):
"""A factory to create a renderer.
Note: Parameters mode, shading and bg_color are currently supported only by
the Python renderer (renderer_type='python').
:param width: Width of the rendered image.
:param height: Height of the rendered image.
:param renderer_type: Type of renderer (options: 'cpp', 'python').
:param mode: Rendering mode ('rgb+depth', 'rgb', 'depth').
:param shading: Type of shading ('flat', 'phong').
:param bg_color: Color of the background (R, G, B, A).
:return: Instance of a renderer of the specified type.
"""
if renderer_type == 'python':
from . import renderer_py
return renderer_py.RendererPython(width, height, mode, shading, bg_color)
elif renderer_type == 'cpp':
from . import renderer_cpp
return renderer_cpp.RendererCpp(width, height)
else:
raise ValueError('Unknown renderer type.')
``` |
{
"source": "JiangBowen0008/CVPR2021_PDNet",
"score": 2
} |
#### File: JiangBowen0008/CVPR2021_PDNet/infer.py
```python
import time
import datetime
import torch
from PIL import Image
from torch.autograd import Variable
from torchvision import transforms
from collections import OrderedDict
from numpy import mean
from skimage import io
from config import *
from misc import *
from pdnet import PDNet
import pdb
torch.manual_seed(2021)
device_ids = [0]
torch.cuda.set_device(device_ids[0])
results_path = './results'
check_mkdir(results_path)
ckpt_path = './ckpt'
exp_name = 'PDNet'
args = {
'scale': 832,
'save_results': False,
}
print(torch.__version__)
img_transform = transforms.Compose([
transforms.Resize((args['scale'], args['scale'])),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
depth_transform = transforms.Compose([
transforms.Resize((args['scale'], args['scale'])),
transforms.ToTensor(),
])
to_pil = transforms.ToPILImage()
to_test = OrderedDict([
('RGBD-Mirror', testing_root),
])
results = OrderedDict()
def predict(net, img, depth):
depth = (depth.astype(np.float32) / np.max(depth))
depth = np.expand_dims(depth, 2)
depth = transforms.ToPILImage()(depth)
img = transforms.ToPILImage()(img)
w, h = img.size
img_var = Variable(img_transform(img).unsqueeze(0))#.cuda(device_ids[0])
depth_var = Variable(depth_transform(depth).unsqueeze(0))#.cuda(device_ids[0])
start_each = time.time()
prediction = net(img_var, depth_var)
time_each = time.time() - start_each
#time_list.append(time_each)
prediction = np.array(transforms.Resize((h, w))(to_pil(prediction.data.squeeze(0).cpu())))
return prediction
def main():
net = PDNet(backbone_path)#.cuda(device_ids[0])
print('Load {}.pth for testing'.format(exp_name))
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name + '.pth')))
print('Load {}.pth succeed!'.format(exp_name))
net.eval()
with torch.no_grad():
start = time.time()
for name, root in to_test.items():
#time_list = []
image_path = os.path.join(root, 'image')
depth_path = os.path.join(root, 'depth_normalized')
if args['save_results']:
check_mkdir(os.path.join(results_path, exp_name))
img_list = [os.path.splitext(f)[0] for f in os.listdir(image_path) if f.endswith('jpg')]
for idx, img_name in enumerate(img_list):
img = io.imread(os.path.join(image_path, img_name + '.jpg'))
depth = io.imread(os.path.join(depth_path, img_name + '.png'))
prediction = predict(net, img, depth)
if args['save_results']:
Image.fromarray(prediction).convert('L').save(os.path.join(results_path, exp_name, img_name + '.png'))
print(('{}'.format(exp_name)))
print("{}'s average Time Is : {:.1f} ms".format(name, mean(time_list) * 1000))
print("{}'s average Time Is : {:.1f} fps".format(name, 1 / mean(time_list)))
end = time.time()
print("Total Testing Time: {}".format(str(datetime.timedelta(seconds=int(end - start)))))
if __name__ == '__main__':
main()
```
#### File: JiangBowen0008/CVPR2021_PDNet/pdnet.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import backbone.resnet.resnet as resnet
class PM(nn.Module):
""" positioning module """
def __init__(self, in_dim_x, in_dim_y):
super(PM, self).__init__()
self.in_dim_x = in_dim_x
self.in_dim_y = in_dim_y
self.in_dim_xy = in_dim_x + in_dim_y
self.in_dim_2xy = (in_dim_x + in_dim_y) * 2
# discontinuity
self.fusion1 = nn.Sequential(nn.Conv2d(self.in_dim_xy, self.in_dim_xy, 3, 1, 1),
nn.BatchNorm2d(self.in_dim_xy), nn.ReLU())
self.local_main = nn.Sequential(nn.Conv2d(self.in_dim_xy, self.in_dim_xy, 3, 1, 1, 1),
nn.BatchNorm2d(self.in_dim_xy), nn.ReLU())
self.context_main = nn.Sequential(nn.Conv2d(self.in_dim_xy, self.in_dim_xy, 5, 1, 4, 2),
nn.BatchNorm2d(self.in_dim_xy), nn.ReLU())
self.global_main = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(self.in_dim_xy, self.in_dim_xy, 1, 1, 0),
nn.BatchNorm2d(self.in_dim_xy), nn.ReLU())
self.bn_main1 = nn.BatchNorm2d(self.in_dim_xy)
self.relu_main1 = nn.ReLU()
self.bn_main2 = nn.BatchNorm2d(self.in_dim_xy)
self.relu_main2 = nn.ReLU()
self.local_rgb = nn.Sequential(nn.Conv2d(self.in_dim_x, self.in_dim_x, 3, 1, 1, 1),
nn.BatchNorm2d(self.in_dim_x), nn.ReLU())
self.context_rgb = nn.Sequential(nn.Conv2d(self.in_dim_x, self.in_dim_x, 5, 1, 4, 2),
nn.BatchNorm2d(self.in_dim_x), nn.ReLU())
self.global_rgb = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(self.in_dim_x, self.in_dim_x, 1, 1, 0),
nn.BatchNorm2d(self.in_dim_x), nn.ReLU())
self.bn_rgb1 = nn.BatchNorm2d(self.in_dim_x)
self.relu_rgb1 = nn.ReLU()
self.bn_rgb2 = nn.BatchNorm2d(self.in_dim_x)
self.relu_rgb2 = nn.ReLU()
self.local_depth = nn.Sequential(nn.Conv2d(self.in_dim_y, self.in_dim_y, 3, 1, 1, 1),
nn.BatchNorm2d(self.in_dim_y), nn.ReLU())
self.context_depth = nn.Sequential(nn.Conv2d(self.in_dim_y, self.in_dim_y, 5, 1, 4, 2),
nn.BatchNorm2d(self.in_dim_y), nn.ReLU())
self.global_depth = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(self.in_dim_y, self.in_dim_y, 1, 1, 0),
nn.BatchNorm2d(self.in_dim_y), nn.ReLU())
self.bn_depth1 = nn.BatchNorm2d(self.in_dim_y)
self.relu_depth1 = nn.ReLU()
self.bn_depth2 = nn.BatchNorm2d(self.in_dim_y)
self.relu_depth2 = nn.ReLU()
self.fusion2 = nn.Sequential(nn.Conv2d(self.in_dim_2xy, self.in_dim_xy, 3, 1, 1),
nn.BatchNorm2d(self.in_dim_xy), nn.ReLU())
# similarity
self.fusion3 = nn.Sequential(nn.Conv2d(self.in_dim_xy, self.in_dim_xy, 3, 1, 1),
nn.BatchNorm2d(self.in_dim_xy), nn.ReLU())
self.value = nn.Conv2d(self.in_dim_xy, self.in_dim_xy, 1, 1, 0)
self.gap_rgb = nn.AdaptiveAvgPool2d((1, 1))
self.mlp_rgb = nn.Sequential(nn.Conv2d(self.in_dim_x, self.in_dim_x // 8, 1, 1, 0),
nn.BatchNorm2d(self.in_dim_x // 8), nn.ReLU(),
nn.Conv2d(self.in_dim_x // 8, 1, 1, 1, 0))
self.gap_depth = nn.AdaptiveAvgPool2d((1, 1))
self.mlp_depth = nn.Sequential(nn.Conv2d(self.in_dim_y, self.in_dim_y // 8, 1, 1, 0),
nn.BatchNorm2d(self.in_dim_y // 8), nn.ReLU(),
nn.Conv2d(self.in_dim_y // 8, 1, 1, 1, 0))
self.softmax_weight = nn.Softmax(dim=1)
self.query_rgb = nn.Conv2d(self.in_dim_x, self.in_dim_x // 8, 1, 1, 0)
self.key_rgb = nn.Conv2d(self.in_dim_x, self.in_dim_x // 8, 1, 1, 0)
self.query_depth = nn.Conv2d(self.in_dim_y, self.in_dim_y // 8, 1, 1, 0)
self.key_depth = nn.Conv2d(self.in_dim_y, self.in_dim_y // 8, 1, 1, 0)
self.softmax_dependency = nn.Softmax(dim=-1)
self.gamma = nn.Parameter(torch.ones(1))
def forward(self, x, y):
"""
inputs :
x : input rgb feature maps (B X C1 X H X W)
y : input depth feature maps (B X C2 X H X W)
returns :
out : enhanced feature maps (B X (C1+C2) X H X W)
"""
# discontinuity
fusion1 = self.fusion1(torch.cat((x, y), 1))
local_main = self.local_main(fusion1)
context_main = self.context_main(fusion1)
global_main = self.global_main(fusion1).expand_as(fusion1)
contrast_main1 = self.relu_main1(self.bn_main1(local_main - context_main))
contrast_main2 = self.relu_main2(self.bn_main2(local_main - global_main))
contrast_main = contrast_main1 + contrast_main2
local_rgb = self.local_rgb(x)
context_rgb = self.context_rgb(x)
global_rgb = self.global_rgb(x).expand_as(x)
contrast_rgb1 = self.relu_rgb1(self.bn_rgb1(local_rgb - context_rgb))
contrast_rgb2 = self.relu_rgb2(self.bn_rgb2(local_rgb - global_rgb))
contrast_rgb = contrast_rgb1 + contrast_rgb2
local_depth = self.local_depth(y)
context_depth = self.context_depth(y)
global_depth = self.global_depth(y).expand_as(y)
contrast_depth1 = self.relu_depth1(self.bn_depth1(local_depth - context_depth))
contrast_depth2 = self.relu_depth2(self.bn_depth2(local_depth - global_depth))
contrast_depth = contrast_depth1 + contrast_depth2
concatenation = torch.cat((contrast_main, contrast_rgb, contrast_depth), 1)
fusion2 = self.fusion2(concatenation)
# similarity
fusion3 = self.fusion3(torch.cat((x, y), 1))
B, C, H, W = fusion3.size()
value = self.value(fusion3).view(B, -1, H * W)
weight_rgb = self.mlp_rgb(self.gap_rgb(x))
weight_depth = self.mlp_depth(self.gap_depth(y))
softmax_weight = self.softmax_weight(torch.cat((weight_rgb, weight_depth), 1))
weight_rgb_normalized, weight_depth_normalized = softmax_weight.split(1, dim=1)
query_rgb = self.query_rgb(x).view(B, -1, H * W).permute(0, 2, 1)
key_rgb = self.key_rgb(x).view(B, -1, H * W)
energy_rgb = torch.bmm(query_rgb, key_rgb)
energy_rgb = energy_rgb * weight_rgb_normalized.squeeze(1).expand_as(energy_rgb)
query_depth = self.query_depth(y).view(B, -1, H * W).permute(0, 2, 1)
key_depth = self.key_depth(y).view(B, -1, H * W)
energy_depth = torch.bmm(query_depth, key_depth)
energy_depth = energy_depth * weight_depth_normalized.squeeze(1).expand_as(energy_depth)
energy = energy_rgb + energy_depth
attention_element = self.softmax_dependency(energy)
fusion4 = torch.bmm(value, attention_element.permute(0, 2, 1)).view(B, C, H, W)
fusion4 = self.gamma * fusion4 + fusion3
# final output features
fusion = fusion2 + fusion4
return fusion
class DM(nn.Module):
""" delineating module """
def __init__(self, in_dim_x, in_dim_y, in_dim_z):
super(DM, self).__init__()
self.in_dim_x = in_dim_x
self.in_dim_y = in_dim_y
self.in_dim_xy = in_dim_x + in_dim_y
self.in_dim_2xy = (in_dim_x + in_dim_y) * 2
self.in_dim_z = in_dim_z
self.up_main = nn.Sequential(nn.Conv2d(self.in_dim_z, self.in_dim_xy, 3, 1, 1),
nn.BatchNorm2d(self.in_dim_xy), nn.ReLU(), nn.UpsamplingBilinear2d(scale_factor=2))
self.up_rgb = nn.Sequential(nn.Conv2d(self.in_dim_z, self.in_dim_x, 3, 1, 1),
nn.BatchNorm2d(self.in_dim_x), nn.ReLU(), nn.UpsamplingBilinear2d(scale_factor=2))
self.up_depth = nn.Sequential(nn.Conv2d(self.in_dim_z, self.in_dim_y, 3, 1, 1),
nn.BatchNorm2d(self.in_dim_y), nn.ReLU(), nn.UpsamplingBilinear2d(scale_factor=2))
self.fusion1 = nn.Sequential(nn.Conv2d(self.in_dim_xy, self.in_dim_xy, 3, 1, 1),
nn.BatchNorm2d(self.in_dim_xy), nn.ReLU())
self.local_main = nn.Sequential(nn.Conv2d(self.in_dim_xy, self.in_dim_xy, 3, 1, 1, 1),
nn.BatchNorm2d(self.in_dim_xy), nn.ReLU())
self.context_main = nn.Sequential(nn.Conv2d(self.in_dim_xy, self.in_dim_xy, 5, 1, 4, 2),
nn.BatchNorm2d(self.in_dim_xy), nn.ReLU())
self.bn_main = nn.BatchNorm2d(self.in_dim_xy)
self.relu_main = nn.ReLU()
self.local_rgb = nn.Sequential(nn.Conv2d(self.in_dim_x, self.in_dim_x, 3, 1, 1, 1),
nn.BatchNorm2d(self.in_dim_x), nn.ReLU())
self.context_rgb = nn.Sequential(nn.Conv2d(self.in_dim_x, self.in_dim_x, 5, 1, 4, 2),
nn.BatchNorm2d(self.in_dim_x), nn.ReLU())
self.bn_rgb = nn.BatchNorm2d(self.in_dim_x)
self.relu_rgb = nn.ReLU()
self.local_depth = nn.Sequential(nn.Conv2d(self.in_dim_y, self.in_dim_y, 3, 1, 1, 1),
nn.BatchNorm2d(self.in_dim_y), nn.ReLU())
self.context_depth = nn.Sequential(nn.Conv2d(self.in_dim_y, self.in_dim_y, 5, 1, 4, 2),
nn.BatchNorm2d(self.in_dim_y), nn.ReLU())
self.bn_depth = nn.BatchNorm2d(self.in_dim_y)
self.relu_depth = nn.ReLU()
self.fusion2 = nn.Sequential(nn.Conv2d(self.in_dim_2xy, self.in_dim_xy, 3, 1, 1),
nn.BatchNorm2d(self.in_dim_xy), nn.ReLU())
def forward(self, x, y, z):
"""
inputs :
x : input rgb feature maps (B X C1 X H1 X W1)
y : input depth feature maps (B X C2 X H1 X W1)
z : input higher-level feature maps (B X C3 X H2 X W2)
returns :
out : enhanced feature maps (B X (C1+C2) X H1 X W1)
"""
up_main = self.up_main(z)
fusion1 = self.fusion1(torch.cat((x, y), 1))
feature_main = fusion1 + up_main
local_main = self.local_main(feature_main)
context_main = self.context_main(feature_main)
contrast_main = self.relu_main(self.bn_main(local_main - context_main))
up_rgb = self.up_rgb(z)
feature_rgb = x + up_rgb
local_rgb = self.local_rgb(feature_rgb)
context_rgb = self.context_rgb(feature_rgb)
contrast_rgb = self.relu_rgb(self.bn_rgb(local_rgb - context_rgb))
up_depth = self.up_depth(z)
feature_depth = y + up_depth
local_depth = self.local_depth(feature_depth)
context_depth = self.context_depth(feature_depth)
contrast_depth = self.relu_depth(self.bn_depth(local_depth - context_depth))
concatenation = torch.cat((contrast_main, contrast_rgb, contrast_depth), 1)
fusion2 = self.fusion2(concatenation)
return fusion2
###################################################################
# ########################## NETWORK ##############################
###################################################################
class PDNet(nn.Module):
def __init__(self, backbone_path=None):
super(PDNet, self).__init__()
# params
# backbone
resnet50 = resnet.resnet50(backbone_path)
self.layer0 = nn.Sequential(resnet50.conv1, resnet50.bn1, resnet50.relu)
self.layer1 = nn.Sequential(resnet50.maxpool, resnet50.layer1)
self.layer2 = resnet50.layer2
self.layer3 = resnet50.layer3
self.layer4 = resnet50.layer4
# depth feature extraction
self.depth_conv0 = nn.Sequential(nn.Conv2d(1, 8, 3, 1, 1), nn.BatchNorm2d(8), nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0))
self.depth_conv1 = nn.Sequential(nn.Conv2d(8, 16, 3, 1, 1), nn.BatchNorm2d(16), nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0))
self.depth_conv2 = nn.Sequential(nn.Conv2d(16, 32, 3, 1, 1), nn.BatchNorm2d(32), nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0))
self.depth_conv3 = nn.Sequential(nn.Conv2d(32, 64, 3, 1, 1), nn.BatchNorm2d(64), nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0))
self.depth_conv4 = nn.Sequential(nn.Conv2d(64, 128, 3, 1, 1), nn.BatchNorm2d(128), nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0))
# channel reduction
self.cr4 = nn.Sequential(nn.Conv2d(2048, 512, 1, 1, 0), nn.BatchNorm2d(512), nn.ReLU())
self.cr3 = nn.Sequential(nn.Conv2d(1024, 256, 1, 1, 0), nn.BatchNorm2d(256), nn.ReLU())
self.cr2 = nn.Sequential(nn.Conv2d(512, 128, 1, 1, 0), nn.BatchNorm2d(128), nn.ReLU())
self.cr1 = nn.Sequential(nn.Conv2d(256, 64, 1, 1, 0), nn.BatchNorm2d(64), nn.ReLU())
# positioning
self.pm = PM(512, 128)
# delineating
self.dm3 = DM(256, 64, 640)
self.dm2 = DM(128, 32, 320)
self.dm1 = DM(64, 16, 160)
# predict
self.predict4 = nn.Conv2d(640, 1, 3, 1, 1)
self.predict3 = nn.Conv2d(320, 1, 3, 1, 1)
self.predict2 = nn.Conv2d(160, 1, 3, 1, 1)
self.predict1 = nn.Conv2d(80, 1, 3, 1, 1)
for m in self.modules():
if isinstance(m, nn.ReLU):
m.inplace = True
def forward(self, x, y):
# x: [batch_size, channel=3, h, w]
# y: [batch_size, channel=1, h, w]
layer0 = self.layer0(x) # [-1, 64, h/2, w/2]
layer1 = self.layer1(layer0) # [-1, 256, h/4, w/4]
layer2 = self.layer2(layer1) # [-1, 512, h/8, w/8]
layer3 = self.layer3(layer2) # [-1, 1024, h/16, w/16]
layer4 = self.layer4(layer3) # [-1, 2048, h/32, w/32]
depth_conv0 = self.depth_conv0(y)
depth_conv1 = self.depth_conv1(depth_conv0)
depth_conv2 = self.depth_conv2(depth_conv1)
depth_conv3 = self.depth_conv3(depth_conv2)
depth_conv4 = self.depth_conv4(depth_conv3)
# channel reduction
cr4 = self.cr4(layer4)
cr3 = self.cr3(layer3)
cr2 = self.cr2(layer2)
cr1 = self.cr1(layer1)
# positioning
pm = self.pm(cr4, depth_conv4)
# delineating
dm3 = self.dm3(cr3, depth_conv3, pm)
dm2 = self.dm2(cr2, depth_conv2, dm3)
dm1 = self.dm1(cr1, depth_conv1, dm2)
# predict
predict4 = self.predict4(pm)
predict3 = self.predict3(dm3)
predict2 = self.predict2(dm2)
predict1 = self.predict1(dm1)
# rescale
predict4 = F.interpolate(predict4, size=x.size()[2:], mode='bilinear', align_corners=True)
predict3 = F.interpolate(predict3, size=x.size()[2:], mode='bilinear', align_corners=True)
predict2 = F.interpolate(predict2, size=x.size()[2:], mode='bilinear', align_corners=True)
predict1 = F.interpolate(predict1, size=x.size()[2:], mode='bilinear', align_corners=True)
if self.training:
return predict4, predict3, predict2, predict1
return torch.sigmoid(predict1)
``` |
{
"source": "JiangBowen-master/DeepCTR",
"score": 2
} |
#### File: tests/models/DCN_test.py
```python
import pytest
import tensorflow as tf
from deepctr.estimator import DCNEstimator
from deepctr.models import DCN
from ..utils import check_model, get_test_data, SAMPLE_SIZE, get_test_data_estimator, check_estimator, \
Estimator_TEST_TF1
@pytest.mark.parametrize(
'cross_num,hidden_size,sparse_feature_num,cross_parameterization',
[(0, (8,), 2, 'vector'), (1, (), 1, 'vector'), (1, (8,), 3, 'vector'),
(0, (8,), 2, 'matrix'), (1, (), 1, 'matrix'), (1, (8,), 3, 'matrix'),
]
)
def test_DCN(cross_num, hidden_size, sparse_feature_num, cross_parameterization):
model_name = "DCN"
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(sample_size, sparse_feature_num=sparse_feature_num,
dense_feature_num=sparse_feature_num)
model = DCN(feature_columns, feature_columns, cross_num=cross_num, cross_parameterization=cross_parameterization,
dnn_hidden_units=hidden_size, dnn_dropout=0.5)
check_model(model, model_name, x, y)
@pytest.mark.parametrize(
'cross_num,hidden_size,sparse_feature_num',
[(1, (8,), 3)
]
)
def test_DCNEstimator(cross_num, hidden_size, sparse_feature_num):
if not Estimator_TEST_TF1 and tf.__version__ < "2.2.0":
return
model_name = "DCN"
sample_size = SAMPLE_SIZE
linear_feature_columns, dnn_feature_columns, input_fn = get_test_data_estimator(sample_size,
sparse_feature_num=sparse_feature_num,
dense_feature_num=sparse_feature_num)
model = DCNEstimator(linear_feature_columns, dnn_feature_columns, cross_num=cross_num, dnn_hidden_units=hidden_size,
dnn_dropout=0.5)
check_estimator(model, input_fn)
# def test_DCN_invalid(embedding_size=8, cross_num=0, hidden_size=()):
# feature_dim_dict = {'sparse': [SparseFeat('sparse_1', 2), SparseFeat('sparse_2', 5), SparseFeat('sparse_3', 10)],
# 'dense': [SparseFeat('dense_1', 1), SparseFeat('dense_1', 1), SparseFeat('dense_1', 1)]}
# with pytest.raises(ValueError):
# _ = DCN(None, embedding_size=embedding_size, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5)
if __name__ == "__main__":
pass
``` |
{
"source": "JiangboYu13/CarND-Behavioral-Cloning-P3",
"score": 3
} |
#### File: JiangboYu13/CarND-Behavioral-Cloning-P3/model.py
```python
from keras.layers import Input, Lambda, Flatten, \
Dense, GlobalAveragePooling2D, Conv2D,Cropping2D
from keras.models import Model, load_model
from keras.utils import plot_model
import argparse
import os
import cv2
import numpy as np
import sklearn
import csv
from sklearn.model_selection import train_test_split
import h5py
from keras import __version__ as keras_version
import tensorflow as tf
from PIL import Image
import matplotlib.pyplot as plt
#Model Architure
def NvidiaModel(input):
normalized = Lambda(lambda image: tf.image.rgb_to_grayscale(image)/255-0.5)(input)
cropped = Cropping2D(cropping=((50,20), (0,0)))(normalized)
conv1 = Conv2D(24, (5,5), strides=(2,2), activation='relu')(cropped)
conv2 = Conv2D(36, (5,5), strides=(2,2), activation='relu')(conv1)
conv3 = Conv2D(48, (5,5), strides=(2,2), activation='relu')(conv2)
conv4 = Conv2D(64, (3,3), activation='relu')(conv3)
conv5 = Conv2D(64, (3,3), activation='relu')(conv4)
flatten = Flatten()(conv5)
fc1 = Dense(100, activation='relu')(flatten)
fc2 = Dense(50, activation='relu')(fc1)
fc3 = Dense(10, activation='relu')(fc2)
output = Dense(1)(fc3)
return output
def generator(samples, batch_size=32):
img_dir = '/opt/carnd_p3/data/'
num_samples = len(samples)
while True:
for offset in range(0, num_samples, batch_size//6):
batch_samples = samples[offset:offset+batch_size//6]
images = []
angles = []
for batch_sample in batch_samples:
#Train model using left/centre/right view image and flip the images to augment the training data
for idx in range(3):
if os.path.isabs(batch_sample[idx]):
img_name = batch_sample[idx]
else:
img_name = os.path.join(img_dir,'IMG',batch_sample[idx].split('/')[-1])
img = np.asarray(Image.open(img_name))
angle = float(batch_sample[3])
if idx == 1:
angle=min(angle + 0.2, 1)
if idx == 2:
angle=max(angle - 0.2, -1)
flip_img = np.fliplr(img)
flip_angle = -angle
images.append(img)
angles.append(angle)
images.append(flip_img)
angles.append(flip_angle)
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Model')
parser.add_argument(
'-d',
'--dir',
type=str,
nargs='*',
default=['/opt/carnd_p3/data/'],
help='Image Directory ')
parser.add_argument(
'-m',
'--model',
type=str,
nargs='*',
help='Original Model')
args = parser.parse_args()
csv_dirs = args.dir
samples = []
for csv_dir in csv_dirs:
with open(os.path.join(csv_dir, 'driving_log.csv')) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
if line[0] == 'center':#discard title line in csv file
continue
samples.append(line)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
batch_size=128
train_generator = generator(train_samples, batch_size=batch_size, )
validation_generator = generator(validation_samples, batch_size=batch_size)
#Train from pre-trained model (transfer learning)
if args.model is not None:
print("Using Pre-trained Model")
model = load_model(args.model[0])
for layer in model.layers:
if 'conv2d' in layer.name:
layer.trainable=False
else:#Retrain the model
input = Input((160, 320, 3))
output = NvidiaModel(input)
model = Model(inputs=input, outputs=output)
model.compile(loss='mse', optimizer='adam')
history_object = model.fit_generator(train_generator, \
steps_per_epoch=np.ceil(len(train_samples)/batch_size*2), \
validation_data=validation_generator, \
validation_steps=np.ceil(len(validation_samples)/batch_size*2), \
epochs=5, verbose=1)
model.save('TEST.h5')
``` |
{
"source": "Jiangce2017/BallConv",
"score": 2
} |
#### File: Jiangce2017/BallConv/train.py
```python
import torch
import numpy as np
from numpy import linalg as LA
import os.path as osp
from graph_dataset import Graph_Dataset
from torch_geometric.data import DataLoader
import torch.nn.functional as F
from torch.nn import Sequential as Seq, Linear as Lin, ReLU, BatchNorm1d as BN, Softmax
from torch_geometric.nn import radius, TAGConv, global_max_pool as gmp, knn
from ballconvnet import BallConv
#from point_cloud_models import DynamicEdge
def MLP(channels, batch_norm=True):
return Seq(*[
Seq(Lin(channels[i - 1], channels[i]), ReLU(), BN(channels[i]))
for i in range(1, len(channels))
])
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
#self.conv1 = TAGConv(4, 128, 3)
#self.conv2 = TAGConv(128, 128, 3)
self.conv1 = BallConv(MLP([2*3, 128, 128, 128]), 'max')
self.conv2 = BallConv(MLP([2*128, 256]), 'max')
self.lin1 = torch.nn.Sequential(
torch.nn.Linear(256,128),
torch.nn.ReLU(),
#BN(512),
)
self.lin2 = torch.nn.Sequential(
BN(128),
torch.nn.Linear(128,128),
torch.nn.ReLU(),
BN(128),
torch.nn.Dropout(0.5)
)
self.lin3 = torch.nn.Sequential(
torch.nn.Linear(128,128),
torch.nn.ReLU(),
BN(128),
torch.nn.Dropout(0.5),
)
self.output = torch.nn.Sequential(
torch.nn.Linear(128, 10)
)
#self.condense = torch.nn.Sequential(
#torch.nn.Linear(512,512),
#torch.nn.ReLU(),
#BN(64),
#torch.nn.Dropout(0.5),
#torch.nn.Linear(64,1),
#torch.nn.ReLU(),
#)
def forward(self, data, idx):
r, pos, batch = data.r, data.pos, data.batch
#x, edge_index, batch, edge_attr = data.x.float(), data.edge_index, data.batch, data.edge_attr.float()
#r_limit = r[0]*0.5
#row, col = radius(pos, pos[idx], r_limit, batch, batch[idx], max_num_neighbors=64)
row, col = knn(pos,pos, 32,batch, batch)
#row, col = radius(pos, pos, r_limit, batch, batch, max_num_neighbors=32)
edge_index = torch.stack([col, row], dim=0).to(device)# (col, row), or (col row)
#edge_attr = torch.ones((edge_index.shape[1],1)).to(device)
x1 = F.relu(self.conv1(pos, edge_index))
x2 = F.relu(self.conv2(x1, edge_index))
x = self.lin1(x2)
# x = x.view(-1,512,128)
# x = torch.transpose(x,1,2)
# #x = x[:,:,:64]
# x = x.reshape(-1,512)
# x = self.condense(x)
# x = x.view(-1,128,512)
# x = torch.transpose(x,1,2)
# x = x.reshape(-1,128)
x = gmp(x, batch)
x = self.lin2(x)
x = self.lin3(x)
x = self.output(x)
return F.log_softmax(x, dim=-1)
def train():
model.train()
train_metrics = {"loss": [], "acc": []}
for batch_i, data in enumerate(train_loader):
optimizer.zero_grad()
data = data.to(device)
predictions = model(data)
loss = F.nll_loss(predictions, data.y)
loss.backward()
optimizer.step()
acc = 100 * (predictions.detach().argmax(1) == data.y).cpu().numpy().mean()
train_metrics["loss"].append(loss.item())
train_metrics["acc"].append(acc)
return np.mean(train_metrics["acc"]), np.mean(train_metrics["loss"])
def test():
model.eval()
test_metrics = {"acc": []}
correct = 0
for batch_i, data in enumerate(test_loader):
data = data.to(device)
with torch.no_grad():
predictions = model(data)
acc = 100 * (predictions.detach().argmax(1) == data.y).cpu().numpy().mean()
test_metrics["acc"].append(acc)
return np.mean(test_metrics["acc"])
if __name__ == '__main__':
sphere_num = 1024
dataset_name = 'ModelNet10_256'
path = osp.join('dataset', dataset_name)
train_dataset = Graph_Dataset(path, '10', True)
test_dataset = Graph_Dataset(path, '10', False)
print(len(train_dataset))
print(len(test_dataset))
print('Dataset loaded.')
bz = 8
train_loader = DataLoader(train_dataset, batch_size=bz, shuffle=True, drop_last=True,
num_workers=2)
test_loader = DataLoader(test_dataset, batch_size=bz, shuffle=True, drop_last=True,
num_workers=2)
model = Net()
model_name = 'Net'
device = torch.device('cuda:0')
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
#model.load_state_dict(checkpoint['state_dict'])
#optimizer.load_state_dict(checkpoint['optimizer'])
criterion = torch.nn.CrossEntropyLoss().to(device)
exp_name = dataset_name+model_name
num_epochs = 400
print(exp_name)
result_path = osp.join('.', 'results')
best_acc = 0
for epoch in range(num_epochs):
train_acc, train_loss = train()
test_acc= test()
is_best = test_acc > best_acc
best_acc = max(best_acc, test_acc)
state = {
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'best_acc': best_acc
}
if is_best:
torch.save(state, '%s/%s_checkpoint.pth' % (result_path, exp_name))
print(exp_name)
log = 'Epoch: {:03d}, Train_Loss: {:.4f}, Train_Acc: {:.4f}, Test_Acc: {:.4f}'
print(log.format(epoch, train_loss, train_acc, test_acc))
``` |
{
"source": "Jiangchao3/pyflwdir",
"score": 3
} |
#### File: pyflwdir/notebooks/utils.py
```python
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm, colors
import cartopy.crs as ccrs
import descartes
import numpy as np
import os
import rasterio
from rasterio import features
import geopandas as gpd
np.random.seed(seed=101)
matplotlib.rcParams["savefig.bbox"] = "tight"
matplotlib.rcParams["savefig.dpi"] = 256
plt.style.use("seaborn-whitegrid")
# read example elevation data and derive background hillslope
fn = os.path.join(os.path.dirname(__file__), "rhine_elv0.tif")
with rasterio.open(fn, "r") as src:
elevtn = src.read(1)
extent = np.array(src.bounds)[[0, 2, 1, 3]]
crs = src.crs
ls = matplotlib.colors.LightSource(azdeg=115, altdeg=45)
hs = ls.hillshade(np.ma.masked_equal(elevtn, -9999), vert_exag=1e3)
# convenience method for plotting
def quickplot(
gdfs=[], raster=None, hillshade=True, extent=extent, hs=hs, title="", filename=""
):
fig = plt.figure(figsize=(8, 15))
ax = fig.add_subplot(projection=ccrs.PlateCarree())
# plot hillshade background
if hillshade:
ax.imshow(
hs,
origin="upper",
extent=extent,
cmap="Greys",
alpha=0.3,
zorder=0,
)
# plot geopandas GeoDataFrame
for gdf, kwargs in gdfs:
gdf.plot(ax=ax, **kwargs)
if raster is not None:
data, nodata, kwargs = raster
ax.imshow(
np.ma.masked_equal(data, nodata),
origin="upper",
extent=extent,
**kwargs,
)
ax.set_aspect("equal")
ax.set_title(title, fontsize="large")
ax.text(
0.01, 0.01, "created with pyflwdir", transform=ax.transAxes, fontsize="large"
)
if filename:
plt.savefig(f"{filename}.png")
return ax
# convenience method for vectorizing a raster
def vectorize(data, nodata, transform, crs=crs, name="value"):
feats_gen = features.shapes(
data,
mask=data != nodata,
transform=transform,
connectivity=8,
)
feats = [
{"geometry": geom, "properties": {name: val}} for geom, val in list(feats_gen)
]
# parse to geopandas for plotting / writing to file
gdf = gpd.GeoDataFrame.from_features(feats, crs=crs)
gdf[name] = gdf[name].astype(data.dtype)
return gdf
```
#### File: pyflwdir/tests/test_subgrid.py
```python
import pytest
import numpy as np
from pyflwdir import subgrid, core, streams
# test data
from test_core import test_data
parsed, flwdir = test_data[0]
idxs_ds, idxs_pit, seq, rank, mv = [p.copy() for p in parsed]
ncol, shape = flwdir.shape[1], flwdir.shape
upa = streams.upstream_area(idxs_ds, seq, ncol, dtype=np.int32)
idxs_us_main = core.main_upstream(idxs_ds, upa, mv=mv)
elv = rank
test = [("eam_plus", 5), ("", 1), ("dmm", 4)]
@pytest.mark.parametrize("method, cellsize", test)
def test_subgridch(method, cellsize):
if cellsize == 1:
idxs_out = np.arange(idxs_ds.size)
idxs_out[idxs_ds == mv] = mv
else:
idxs_out, _ = subgrid.outlets(
idxs_ds, upa, cellsize, shape, method=method, mv=mv
)
umap, uare = subgrid.segment_area(
idxs_out, idxs_ds, seq, area=np.ones(idxs_ds.size, dtype=np.int32), mv=mv
)
# upstream
rivlen = subgrid.segment_length(idxs_out, idxs_us_main, distnc=rank.ravel(), mv=mv)
rivslp = subgrid.fixed_length_slope(
idxs_out, idxs_ds, idxs_us_main, elv, rank.ravel(), mv=mv
)
rivwth = subgrid.segment_average(
idxs_out, idxs_us_main, np.ones(elv.size), np.ones(elv.size), mv=mv
)
if cellsize == 1:
assert np.all(uare[umap != 0] == cellsize)
assert np.all(rivlen[upa == 1] == 0) # headwater cells
assert np.all(rivlen[upa > 1] >= 1) # downstream cells
assert np.all(np.isclose(rivslp[rivlen > 0], 1 / rivlen[rivlen > 0]))
assert np.all(rivwth[idxs_out != mv] >= 0) # downstream cells
assert np.all(rivslp[idxs_out != mv] >= 0)
assert np.all(rivlen[idxs_out != mv] >= 0)
assert umap.max() - 1 == np.where(idxs_out != mv)[0][-1]
assert np.all(uare[idxs_out != mv] >= 1)
# downstream
rivlen1 = subgrid.segment_length(idxs_out, idxs_ds, distnc=rank.ravel(), mv=mv)
pits = idxs_ds[idxs_out[idxs_out != mv]] == idxs_out[idxs_out != mv]
assert np.all(rivlen1[idxs_out != mv][pits] == 0)
assert np.all(rivlen1[idxs_out != mv] >= 0)
# mask
rivlen2 = subgrid.segment_length(idxs_out, idxs_us_main, distnc=rank.ravel(), mv=mv)
rivlen3 = subgrid.segment_length(
idxs_out, idxs_us_main, distnc=rank.ravel(), mask=upa >= 5, mv=mv
)
assert np.all(rivlen2 >= rivlen3)
``` |
{
"source": "Jiangchenglin521/EmotionDisEDAPPA",
"score": 3
} |
#### File: EmotionDisEDAPPA/emotional distribution strategy/CICS.py
```python
import jieba
import numpy as np
import gzip
import jieba
import re
import json
import tarfile
import configparser
import pickle
import os
import csv
import time
import datetime
import random
import json
import math
import warnings
from collections import Counter
from math import sqrt
from tensorflow.python.platform import gfile
import gensim
import pandas as pd
import numpy as np
import tensorflow as tf
#(1)icount 还没改,要限制在同一句表达中
#(2)多个以句号分句的句子可能不会成功。而且还未查找更具体的断句字符。
#***********************加载EL****************************
int2emotion = ['others', 'like', 'sad', 'disgust', 'angry', 'happy']
nn = ['sad','angry', 'disgust']
pp = ['like', 'happy']
la = []
lines = open('/Users/apple/Desktop/DLUT-Emotionontology-master/分离文件/分类后/other.txt', encoding='utf-8').read().splitlines()
a = [x for x in lines]
la.append(a)
lines1 = open('/Users/apple/Desktop/DLUT-Emotionontology-master/分离文件/分类后/c1.txt', encoding='utf-8').read().splitlines()
b = [x for x in lines1]
la.append(b)
lines2 = open('/Users/apple/Desktop/DLUT-Emotionontology-master/分离文件/分类后/c2.txt', encoding='utf-8').read().splitlines()
c1 = [x for x in lines2]
la.append(c1)
lines3 = open('/Users/apple/Desktop/DLUT-Emotionontology-master/分离文件/分类后/c3.txt', encoding='utf-8').read().splitlines()
d = [x for x in lines3]
la.append(d)
lines4 = open('/Users/apple/Desktop/DLUT-Emotionontology-master/分离文件/分类后/c4.txt', encoding='utf-8').read().splitlines()
e = [x for x in lines4]
la.append(e)
lines5 = open('/Users/apple/Desktop/DLUT-Emotionontology-master/分离文件/分类后/c5.txt', encoding='utf-8').read().splitlines()
f = [x for x in lines5]
la.append(f)
#打开词典文件,返回列表
def open_dict(Dict = 'hahah', path=r'/Users/apple/Desktop/Textming/Sent_Dict/Hownet/'):
path = path + '%s.txt' % Dict
dictionary = open(path, 'r', encoding='utf-8')
dict = []
for word in dictionary:
word = word.strip('\n')
dict.append(word)
return dict
def judgeodd(num):
if (num % 2) == 0:
return 'even'
else:
return 'odd'
#注意,这里你要修改path路径。
deny_word = open_dict(Dict = '否定词', path= r'/Users/apple/Desktop/Textming/')
posdict = open_dict(Dict = 'positive', path= r'/Users/apple/Desktop/Textming/')
negdict = open_dict(Dict = 'negative', path= r'/Users/apple/Desktop/Textming/')
degree_word = open_dict(Dict = '程度级别词语', path= r'/Users/apple/Desktop/Textming/')
mostdict = degree_word[degree_word.index('extreme')+1 : degree_word.index('very')]#权重4,即在情感词前乘以4
verydict = degree_word[degree_word.index('very')+1 : degree_word.index('more')]#权重3
moredict = degree_word[degree_word.index('more')+1 : degree_word.index('ish')]#权重2
ishdict = degree_word[degree_word.index('ish')+1 : degree_word.index('last')]#权重0.5
def distribution(sentence, label, r):
e_word = []
init = [0, 0, 0, 0, 0, 0]
init[label] = 1
count = [0, 0, 0, 0, 0, 0]
distr = [0, 0, 0, 0, 0, 0]
segs = jieba.lcut(sentence, cut_all=False)
for word in segs:
if word in a:
init[0] = 1
count[0] += 1
e_word.append(word)
e_word.append(word)
elif word in b:
init[1] = 1
count[1] += 1
e_word.append(word)
elif word in c1:
init[2] = 1
count[2] += 1
e_word.append(word)
elif word in d:
init[3] = 1
count[3] += 1
e_word.append(word)
elif word in e:
init[4] = 1
count[4] += 1
e_word.append(word)
elif word in f:
init[5] = 1
count[5] += 1
e_word.append(word)
if np.sum(init, 0) ==0 or np.sum(init, 0) ==1:
init[label] = 1
return init
else:
ban = init
ban[label] = r
all =0
for c in range(6):
if init[c] == 1 and c!=label:
all += count[c]
for k in range(6):
if ban[k] == 1:
ban[k] = (1 - r) * (count[k] / all)
return ban
lis = json.load(open('/Users/apple/Desktop/Textming/cate1s', encoding='utf-8'))
emo = json.load(open('/Users/apple/Desktop/Textming/cate1e', encoding='utf-8'))
l1, l2, l3, dataar = [],[],[], []
rr = 0.6
for i,j in enumerate(lis):
distributionss = distribution(j, emo[i], rr)
l1.append(j)
l2.append(emo[i])
nvm = str(distributionss)
l3.append(nvm)
dataar.append(l1)
dataar.append(l2)
dataar.append(l3)
np_data = np.array(dataar)
np_data = np_data.T
np.array(np_data)
save = pd.DataFrame(np_data, columns=['sentence', 'emotion label', 'distribution'])
save.to_csv('/Users/apple/Desktop/Textming/CICS.csv')
print('over')
``` |
{
"source": "Jiangchenglin521/fc",
"score": 2
} |
#### File: Jiangchenglin521/fc/chatbot.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import nltk
import os
import random
import sys
import time
import json
from nltk.translate.bleu_score import sentence_bleu
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import pickle
import data_utils
import seq2seq_model
import configparser
#配置参数文件接口,统一配置,在此修改
#训练,测试参数输入
config = configparser.RawConfigParser()
config.read('config')
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
tf.app.flags.DEFINE_float("learning_rate", 0.5, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.98,
"Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("epoch", 80, "num of whole training turn")
tf.app.flags.DEFINE_integer("batch_size", 256,
"Batch size to use during training.")
tf.app.flags.DEFINE_integer("size", 256, "Size of each model layer.")
tf.app.flags.DEFINE_integer("load_model", 0, "which model to load.")
tf.app.flags.DEFINE_integer("beam_size", 20, "Size of beam.")
tf.app.flags.DEFINE_integer("embedding_size", 200, "Size of word embedding.")
tf.app.flags.DEFINE_integer("emotion_size", 200, "Size of emotion embedding.")
tf.app.flags.DEFINE_integer("imemory_size", 256, "Size of imemory.")
tf.app.flags.DEFINE_integer("category", 6, "category of emotions.")
tf.app.flags.DEFINE_integer("num_layers", 2, "Number of layers in the model.")
tf.app.flags.DEFINE_integer("totaldata", 1007286, "the whole dataset size 1 epoch.")
tf.app.flags.DEFINE_integer("post_vocab_size", 40000, "post vocabulary size.")
tf.app.flags.DEFINE_integer("response_vocab_size", 40000, "response vocabulary size.")
tf.app.flags.DEFINE_string("data_dir", "data", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "train", "Training directory.")
tf.app.flags.DEFINE_string("test_dir", "train", "Training directory.")
tf.app.flags.DEFINE_string("pretrain_dir", "pretrain", "Pretraining directory.")
tf.app.flags.DEFINE_integer("pretrain", -1, "pretrain model number")
tf.app.flags.DEFINE_integer("max_train_data_size", 0,
"Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 1000,
"How many training steps to do per checkpoint.")
tf.app.flags.DEFINE_boolean("use_emb", False,
"use embedding model")
tf.app.flags.DEFINE_boolean("use_imemory", False,
"use imemory model")
tf.app.flags.DEFINE_boolean("use_ememory", False,
"use ememory model")
tf.app.flags.DEFINE_boolean("decode", False,
"Set to True for interactive decoding.")
tf.app.flags.DEFINE_boolean("human_evaluation", False,
"Set to True for interactive decoding.")
tf.app.flags.DEFINE_boolean("metrics", False,
"Set to True for make evaluations.")
tf.app.flags.DEFINE_boolean("beam_search", False, "beam search")
tf.app.flags.DEFINE_boolean("use_fp16", False,
"Train using fp16 instead of fp32.")
tf.app.flags.DEFINE_boolean("use_ppx_acc", False,
"use metric1")
tf.app.flags.DEFINE_boolean("use_bleu", False,
"use BLEU")
tf.app.flags.DEFINE_boolean("use_fg", False,
"use fg acc")
FLAGS = tf.app.flags.FLAGS
# We use a number of buckets and pad to the closest one for efficiency.
# See seq2seq_model.Seq2SeqModel for details of how they work.
_buckets = [(12, 12), (16, 16), (20, 20), (30, 30)]
#读取并分配数据,符合模型训练feed的数据结构
#返回:结构化的训练数据
def read_data(path, max_size=None):
data_set = [[] for _ in _buckets]
data = json.load(open(path,'r'))
# print(data)
counter = 0
size_max = 0
for pair in data:
post = pair[0]
responses = pair[1]
source_ids = [int(x) for x in post[0]]
target_ids = [int(x) for x in responses[0]]
target_ids.append(data_utils.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids) < source_size and len(target_ids) < target_size:
data_set[bucket_id].append([source_ids, target_ids, int(post[1]), int(responses[1])])
break
return data_set
#可用于优化数据的函数
def refine_data(data):
new_data = []
for d in data:
b = []
for e in range(6):
b.append([x for x in d if x[-1] == e])
new_data.append(b)
return new_data
#构建模型框架
#负责初始化用于训练的模型,模型的保存,以及测试时已有模型的复载。
def create_model(session, forward_only, beam_search):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
#加载预训练好的词向量文件
vec_post, vec_response = data_utils.get_data(FLAGS.data_dir, FLAGS.post_vocab_size, FLAGS.response_vocab_size)
print('============-===============', vec_post)
print(len(vec_post[1]))
model = seq2seq_model.Seq2SeqModel(
FLAGS.post_vocab_size,
FLAGS.response_vocab_size,
_buckets,
FLAGS.size,
FLAGS.num_layers,
FLAGS.max_gradient_norm,
FLAGS.batch_size,
FLAGS.learning_rate,
FLAGS.learning_rate_decay_factor,
wordEmbedding=vec_post,
embedding_size=FLAGS.embedding_size,
forward_only=forward_only,
beam_search=beam_search,
beam_size=FLAGS.beam_size,
category=FLAGS.category,
use_emb=FLAGS.use_emb,
use_imemory=FLAGS.use_imemory,
use_ememory=FLAGS.use_ememory,
emotion_size=FLAGS.emotion_size,
imemory_size=FLAGS.imemory_size,
dtype=dtype)
see_variable = True
if see_variable == True:
for i in tf.all_variables():
print(i.name, i.get_shape())
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
pre_ckpt = tf.train.get_checkpoint_state(FLAGS.pretrain_dir)
#判断是否已经存在模型文件
if ckpt: #and tf.gfile.Exists(ckpt.model_checkpoint_path+".index"):
if FLAGS.load_model == 0:
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
path = ckpt.model_checkpoint_path[:ckpt.model_checkpoint_path.find('-')+1]+str(FLAGS.load_model)
print("Reading model parameters from %s" % path)
model.saver.restore(session, path)
else:
#初始化,从新训练
if pre_ckpt:
session.run(tf.initialize_variables(model.initial_var))
if FLAGS.pretrain > -1:
path = pre_ckpt.model_checkpoint_path[:pre_ckpt.model_checkpoint_path.find('-')+1]+str(FLAGS.pretrain)
print("Reading pretrain model parameters from %s" % path)
model.pretrain_saver.restore(session, path)
else:
print("Reading pretrain model parameters from %s" % pre_ckpt.model_checkpoint_path)
model.pretrain_saver.restore(session, pre_ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
session.run(tf.global_variables_initializer())
# vec_post, vec_response = data_utils.get_data(FLAGS.data_dir, FLAGS.post_vocab_size, FLAGS.response_vocab_size)
# print('vec_post:', vec_post.shape)
# print('vec_res:', vec_response)
# initvec_post = tf.constant(vec_post, dtype=dtype, name='init_wordvector_post')
#定位decoder词向量初始化,用预训练的词向量替换
initvec_response = tf.constant(vec_response, dtype=dtype, name='init_wordvector_response')
# embedding_post = [x for x in tf.trainable_variables() if x.name == 'embedding_attention_seq2seq/rnn/embedding_wrapper/embedding:0'][0]
embedding_response = [x for x in tf.trainable_variables() if x.name == 'embedding_attention_seq2seq/embedding_attention_decoder/embedding:0'][0]
print(type(embedding_response))
print(embedding_response)
# session.run(tf.assign(embedding_post, initvec_post))
# session.run(tf.assign(embedding_response, initvec_response))
# session.run(embedding_post.assign(initvec_post))
session.run(embedding_response.assign(initvec_response))
#
return model
#开始训练/Start Training
def train():
# print(FLAGS.__flags)
# Prepare data.
print("Preparing data in %s" % FLAGS.data_dir)
train_path, dev_path, test_path, _, _ = data_utils.prepare_data(
FLAGS.data_dir, FLAGS.post_vocab_size, FLAGS.response_vocab_size)
with tf.Session(config=sess_config) as sess:
# 构建模型/create model
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.size))
model = create_model(sess, False, False)
# Read data into buckets and compute their sizes.
print ("Reading development and training data (limit: %d)."
% FLAGS.max_train_data_size)
_, id2word = data_utils.initialize_vocabulary('/mnt/data/jiangchenglin/fc-master/data/vocab40000.response')
dev_set = read_data(dev_path)
dev_set = refine_data(dev_set)
train_set = read_data(train_path, FLAGS.max_train_data_size)
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
train_total_size = float(sum(train_bucket_sizes))
print([len(x) for x in dev_set])
# for x in dev_set:
# print(x)
print([len(x) for x in train_set])
# for x in train_set:
# print(x)
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
print(train_buckets_scale)
# This is the training loop.
step_time, loss = 0.0, 0.0
current_step = 0
in_epoch_steps = FLAGS.totaldata / FLAGS.batch_size
previous_losses = []
word2count = pickle.load(open('/mnt/data/jiangchenglin/fc-master/word2idf', 'rb'))
word2count["_PAD"] = 10000000
word2count["_GO"] = 10000000
word2count["_EOS"] = 10000000
word2count["_UNK"] =100
try: # 触发用户终止训练异常,保存当前模型文件
for e in range(FLAGS.epoch):
print("enter the traing, epoch:",(e+1))
for i in range(int(in_epoch_steps)):
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step./一个一个batch迭代训练,完成一个epoch
print("Get a batch and make a step")
start_time = time.time()
encoder_inputs, decoder_inputs, target_weights, target_weights1, decoder_emotions = model.get_batch(
train_set, bucket_id, id2word, word2count)
_, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, target_weights1, decoder_emotions, bucket_id, False, False)
step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint
loss += step_loss / FLAGS.steps_per_checkpoint
current_step += 1
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % FLAGS.steps_per_checkpoint == 0:
# Print statistics for the previous epoch.
perplexity = math.exp(float(loss)) if loss < 300 else float("inf")
print("global step %d (%.2f epoch) learning rate %.4f step-time %.2f perplexity "
"%.2f" % (model.global_step.eval(), model.global_step.eval() / float(in_epoch_steps),
model.learning_rate.eval(), step_time, perplexity))
# Decrease learning rate if no improvement was seen over last 3 times.
if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(loss)
# Save checkpoint and zero timer and loss.
if current_step % (FLAGS.steps_per_checkpoint * 10) == 0 or current_step % 34000 == 0:
checkpoint_path = os.path.join(FLAGS.train_dir, "translate.ckpt")
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
step_time, loss = 0.0, 0
# dev set evaluation
total_loss = .0
total_len = .0
for bucket_id in xrange(len(_buckets)):
if len(dev_set[bucket_id]) == 0:
print(" eval: empty bucket %d" % (bucket_id))
continue
bucket_loss = .0
bucket_len = .0
for e in range(6):
len_data = len(dev_set[bucket_id][e])
for batch in xrange(0, len_data, FLAGS.batch_size):
step = min(FLAGS.batch_size, len_data - batch)
model.batch_size = step
encoder_inputs, decoder_inputs, target_weights, target_weights1, decoder_emotions = model.get_batch_data(
dev_set[bucket_id][e][batch:batch + step], bucket_id, id2word, word2count)
_, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, decoder_emotions, bucket_id, True,
False)
bucket_loss += eval_loss * step
bucket_len += len_data
total_loss += bucket_loss
total_len += bucket_len
bucket_loss = float(bucket_loss / bucket_len)
bucket_ppx = math.exp(bucket_loss) if bucket_loss < 300 else float(
"inf")
print(" dev_set eval: bucket %d perplexity %.2f" % (bucket_id, bucket_ppx))
total_loss = float(total_loss / total_len)
total_ppx = math.exp(total_loss) if total_loss < 300 else float(
"inf")
print(" dev_set eval: bucket avg perplexity %.2f" % (total_ppx))
sys.stdout.flush()
model.batch_size = FLAGS.batch_size
except (KeyboardInterrupt, SystemExit): # If the user press Ctrl+C while testing progress
print('Interruption detected, exiting the program...')
#用于交互实测,体验对话效果。
def decode():
try:
from wordseg_python import Global
except:
Global = None
def split(sent):
sent = sent.decode('utf-8', 'ignore').encode('gbk', 'ignore')
if Global == None:
return sent.decode("gbk").split(' ')
tuples = [(word.decode("gbk"), pos) for word, pos in Global.GetTokenPos(sent)]
return [each[0] for each in tuples]
with tf.Session(config=sess_config) as sess:
with tf.device("/cpu:0"):
# Create model and load parameters.
model = create_model(sess, True, FLAGS.beam_search)
model.batch_size = 1 # We decode one sentence at a time.
beam_search = FLAGS.beam_search
beam_size = FLAGS.beam_size
num_output = 5
# 加载词典.
post_vocab_path = os.path.join(FLAGS.data_dir,
config.get('data', 'post_vocab_file') % (FLAGS.post_vocab_size))
response_vocab_path = os.path.join(FLAGS.data_dir,
config.get('data', 'response_vocab_file') % (FLAGS.response_vocab_size))
post_vocab, _ = data_utils.initialize_vocabulary(post_vocab_path)
_, rev_response_vocab = data_utils.initialize_vocabulary(response_vocab_path)
# Decode from standard input.
sys.stdout.write("用户: ")
sys.stdout.flush()
sentence = sys.stdin.readline()
while sentence:
print(sentence)
sentence = " ".join(sentence)
# Get token-ids for the input sentence.
token_ids = data_utils.sentence_to_token_ids(sentence, post_vocab)
print(token_ids)
int2emotion = ['null', 'like', 'sad', 'disgust', 'angry', 'happy']
bucket_id = min([b for b in xrange(len(_buckets))
if _buckets[b][0] > len(token_ids)])
# Get a 1-element batch to feed the sentence to the model.
decoder_emotion = 0
encoder_inputs, decoder_inputs, target_weights,target_weights1, decoder_emotions = model.get_batch_data(
[[token_ids, [], 0, decoder_emotion]], bucket_id, id2word, word2count)
# Get output logits for the sentence.
results, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, decoder_emotions, bucket_id, True, beam_search)
if beam_search:
result = results[0]
symbol = results[1]
parent = results[2]
result = results[0]
symbol = results[1]
parent = results[2]
res = []
nounk = []
for i, (prb, _, prt) in enumerate(result):
if len(prb) == 0: continue
for j in xrange(len(prb)):
p = prt[j]
s = -1
output = []
for step in xrange(i - 1, -1, -1):
s = symbol[step][p]
p = parent[step][p]
output.append(s)
output.reverse()
if data_utils.UNK_ID in output:
res.append([prb[j][0],
" ".join([tf.compat.as_str(rev_response_vocab[int(x)]) for x in output])])
else:
nounk.append([prb[j][0],
" ".join([tf.compat.as_str(rev_response_vocab[int(x)]) for x in output])])
res.sort(key=lambda x: x[0], reverse=True)
nounk.sort(key=lambda x: x[0], reverse=True)
if len(nounk) < beam_size:
res = nounk + res[:(num_output - len(nounk))]
else:
res = nounk
for i in res[:num_output]:
print(1)
#在预测的时候,使用greedy去top1回复进行输出
else:
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(np.split(logit, [2, FLAGS.response_vocab_size], axis=1)[1], axis=1) + 2)
for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if data_utils.EOS_ID in outputs:
outputs = outputs[:outputs.index(data_utils.EOS_ID)]
# Print out response sentence corresponding to outputs.
print('E先生:' + ':' + "".join(
[tf.compat.as_str(rev_response_vocab[output]) for output in outputs]))
print("用户: ", end="")
sys.stdout.flush()
sentence = sys.stdin.readline()
#以下为评估指标测试样例
#测试指标perplexity,bleu,以及accuraccy(闲聊中暂不考虑)
def evaluation():
with tf.Session(config=sess_config) as sess:
model = create_model(sess, False, FLAGS.beam_search)
# model.evaluation = FLAGS.metrics
print("Preparing data in %s" % FLAGS.data_dir)
# _, dev_path, _, _, _ = data_utils.prepare_data(
# FLAGS.data_dir, FLAGS.post_vocab_size, FLAGS.response_vocab_size)
#
# dev_set = read_data(dev_path)
# data_set = [[] for _ in _buckets]
# print('===长度')
# print(len(dev_set[0]))
# print(len(dev_set[1]))
# print(len(dev_set[2]))
# print(len(dev_set[3]))
# k = []
# for i in range(1000):
# j = random.randint(1, 11965)
# k.append(j)
# if i == 0:
#
# for bucket_id in range(len(_buckets)):
# data_set[bucket_id].append(dev_set[bucket_id][j])
# else:
# if j == k[-1]:
#
# while j == k[-1]:
# j = random.randint(1, 11965)
# for bucket_id in range(len(_buckets)):
# data_set[bucket_id].append(dev_set[bucket_id][j])
# else:
# for bucket_id in range(len(_buckets)):
# data_set[bucket_id].append(dev_set[bucket_id][j])
#
# dev_set = refine_data(data_set)
# with open('/home/minelab/jiangchenglin/eqa/mutualAutoeqa/data/test_data', 'w') as output:
# output.write(json.dumps(dev_set, ensure_ascii=False))
test_path = os.path.join(FLAGS.data_dir, config.get('data', 'test_data'))
dev_set = json.load(open(test_path, 'r'))
#选择进入模式:
PPT = FLAGS.use_ppx_acc
# model.PPT = PPT
BLEU = FLAGS.use_bleu
fg_acc = FLAGS.use_fg
# print('======处理数据')
# print(dev_set[0][0])
# print(len(dev_set[0][0]))
# print(len(dev_set[0][1]))
# print(len(dev_set[0][5]))
if PPT:
total_loss = .0
total_len = .0
for bucket_id in xrange(len(_buckets)):
if len(dev_set[bucket_id]) == 0:
print(" eval: empty bucket %d" % (bucket_id))
continue
bucket_loss = .0
bucket_len = .0
for e in range(6):
len_data = len(dev_set[bucket_id][e])
for batch in xrange(0, len_data, FLAGS.batch_size):
step = min(FLAGS.batch_size, len_data - batch)
model.batch_size = step
encoder_inputs, decoder_inputs, target_weights, target_weights1, decoder_emotions = model.get_batch_data(
dev_set[bucket_id][e][batch:batch + step], bucket_id, id2word, word2count)
_, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, decoder_emotions, bucket_id, True,
False)
bucket_loss += eval_loss * step
bucket_len += len_data
total_loss += bucket_loss
total_len += bucket_len
bucket_loss = float(bucket_loss / bucket_len)
bucket_ppx = math.exp(bucket_loss) if bucket_loss < 300 else float(
"inf")
print(
" test_set eval: bucket %d perplexity %.2f" % (bucket_id, bucket_ppx,))
total_loss = float(total_loss / total_len)
total_ppx = math.exp(total_loss) if total_loss < 300 else float(
"inf")
print(" test_set eval: bucket avg perplexity %.2f" % (total_ppx))
# sys.stdout.flush()
if BLEU:
total_bleu = .0
total_len = .0
print('===计算bleu模式')
# model = create_model(sess, len(word2id), len(word2id), True, beam_search=False, beam_size=1)
# FLAGS.batch_size = 1
print('Start testing --bleu (press Ctrl+C to save and exit)...')
for bucket_id in xrange(len(_buckets)):
if len(dev_set[bucket_id]) == 0:
print(" eval: empty bucket %d" % (bucket_id))
continue
bucket_bleu = .0
bucket_len = .0
for e in range(6):
len_data = len(dev_set[bucket_id][e])
for batch in xrange(0, len_data, FLAGS.batch_size):
step = min(FLAGS.batch_size, len_data - batch)
model.batch_size = step
encoder_inputs, decoder_inputs, target_weights, decoder_emotions, refer = model.get_batch_data1(
dev_set[bucket_id][e][batch:batch + step], bucket_id)
_, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, decoder_emotions, bucket_id, True,
False)
# print(step)
k = len(output_logits)
# print(output_logits)
for i in range(step):
logits = []
for j in range(k):
logits.append(output_logits[j][i])
# print('---0-0-0-0-000---')
# print(len(logits))
# print(len(logits[1]))
# print(type(logits[1]))
# print(logits[1])
outputs = [
int(np.argmax(np.split([logit], [2, FLAGS.response_vocab_size], axis=1)[1], axis=1) + 2)
for logit in logits]
# If there is an EOS symbol in outputs, cut them at that point.
if data_utils.EOS_ID in outputs:
outputs = outputs[:outputs.index(data_utils.EOS_ID)]
#############
response_vocab_path = os.path.join(FLAGS.data_dir,
config.get('data', 'response_vocab_file') % (
FLAGS.response_vocab_size))
#############
_, rev_response_vocab = data_utils.initialize_vocabulary(response_vocab_path)
candidate = data_utils.cov2seq(outputs, rev_response_vocab)
ref = data_utils.cov2seq(refer[i], rev_response_vocab)
refer1 = [ref]
#############
# print(candidate)
# print(refer1)
# print('=====')
bleu_2 = sentence_bleu(refer1, candidate, weights=(0.5, 0.5, 0, 0))
# print(bleu_2)
#############
bucket_bleu += bleu_2
bucket_len += len_data
total_bleu += bucket_bleu
total_len += bucket_len
bucket_bleu = float(bucket_bleu / bucket_len)
print(
" test_set eval: bucket %d bleu %.2f" % (bucket_id, bucket_bleu))
total_bleu= float(total_bleu / total_len)
print(" test_set eval: bucket avg bleu %.2f" % (total_bleu))
#主函数
def main(_):
if FLAGS.decode:
decode()
if FLAGS.metrics:
evaluation()
# if FLAGS.human_evaluation:
# generation()
else:
train()
if __name__ == "__main__":
tf.app.run()
``` |
{
"source": "JIANG-CX/data_labeling",
"score": 2
} |
#### File: keras/distribute/multi_worker_testing_utils.py
```python
import tensorflow.compat.v2 as tf
import keras
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from keras.optimizer_v2 import gradient_descent
from tensorflow.python.training.server_lib import ClusterSpec
def mnist_synthetic_dataset(batch_size, steps_per_epoch):
"""Generate synthetic MNIST dataset for testing."""
# train dataset
x_train = tf.ones([batch_size * steps_per_epoch, 28, 28, 1],
dtype=tf.float32)
y_train = tf.ones([batch_size * steps_per_epoch, 1],
dtype=tf.int32)
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_ds = train_ds.repeat()
# train_ds = train_ds.shuffle(100)
train_ds = train_ds.batch(64, drop_remainder=True)
# eval dataset
x_test = tf.random.uniform([10000, 28, 28, 1], dtype=tf.float32)
y_test = tf.random.uniform([10000, 1],
minval=0,
maxval=9,
dtype=tf.int32)
eval_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))
eval_ds = eval_ds.batch(64, drop_remainder=True)
return train_ds, eval_ds
def get_mnist_model(input_shape):
"""Define a deterministically-initialized CNN model for MNIST testing."""
inputs = keras.Input(shape=input_shape)
x = keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation="relu",
kernel_initializer=keras.initializers.TruncatedNormal(seed=99))(inputs)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Flatten()(x) + keras.layers.Flatten()(x)
x = keras.layers.Dense(
10,
activation="softmax",
kernel_initializer=keras.initializers.TruncatedNormal(seed=99))(x)
model = keras.Model(inputs=inputs, outputs=x)
# TODO(yuefengz): optimizer with slot variables doesn't work because of
# optimizer's bug.
# TODO(yuefengz): we should not allow non-v2 optimizer.
model.compile(
loss=keras.losses.sparse_categorical_crossentropy,
optimizer=gradient_descent.SGD(learning_rate=0.001),
metrics=["accuracy"])
return model
def make_parameter_server_cluster(num_workers, num_ps):
cluster_def = multi_worker_test_base.create_in_process_cluster(
num_workers=num_workers, num_ps=num_ps, rpc_layer="grpc")
cluster_def["chief"] = [
"localhost:%d" % multi_worker_test_base.pick_unused_port()
]
return SimpleClusterResolver(ClusterSpec(cluster_def), rpc_layer="grpc")
```
#### File: engine/tester/builder.py
```python
import logging
from typing import Dict
from yacs.config import CfgNode
from videoanalyst.pipeline.pipeline_base import PipelineBase
from .tester_base import TRACK_TESTERS, VOS_TESTERS
logger = logging.getLogger(__file__)
def build(task: str, cfg: CfgNode, pipeline: PipelineBase):
r"""
Builder function.
Arguments
---------
task: str
builder task name (track|vos)
cfg: CfgNode
buidler configuration
pipeline: PipelineBase
underlying pipeline
Returns
-------
TesterBse
tester built by builder
"""
if task == "track":
modules = TRACK_TESTERS
elif task == "vos":
modules = VOS_TESTERS
else:
logger.error("no tester for task {}".format(task))
exit(-1)
names = cfg.tester.names
testers = []
# 此处可以返回多个实验的tester
for name in names:
tester = modules[name](cfg, pipeline)
hps = tester.get_hps()
# from IPython import embed;embed()
for hp_name in hps:
if hp_name in cfg.tester[name]:
new_value = cfg.tester[name][hp_name]
hps[hp_name] = new_value
tester.set_hps(hps)
tester.update_params()
testers.append(tester)
return testers
def get_config() -> Dict[str, CfgNode]:
r"""
Get available component list config
Returns
-------
Dict[str, CfgNode]
config with list of available components
"""
cfg_dict = {"track": CfgNode(), "vos": CfgNode()}
for cfg_name, module in zip(["track", "vos"], [TRACK_TESTERS, VOS_TESTERS]):
cfg = cfg_dict[cfg_name]
cfg["names"] = []
for name in module:
cfg["names"].append(name)
cfg[name] = CfgNode()
tester = module[name]
hps = tester.default_hyper_params
for hp_name in hps:
cfg[name][hp_name] = hps[hp_name]
return cfg_dict
```
#### File: evaluation/vot_benchmark/benchmark_helper.py
```python
import json
from collections import OrderedDict
from os import listdir
from os.path import dirname, exists, isdir, join, realpath
from pathlib import Path
import cv2
import numpy as np
def get_json(path):
with open(path) as f:
return json.load(f)
def get_txt(path):
with open(path) as f:
return f.read()
def get_img(path):
img = cv2.imread(path)
return img
def get_files(path, suffix):
if isinstance(path, str):
p = Path(path)
else:
p = path
list_dir = list(p.glob('*'))
result = [x.name for x in list_dir if x.suffix == suffix]
return result
def get_dataset_zoo():
root = realpath(join(dirname(__file__), '../data'))
zoos = listdir(root)
def valid(x):
y = join(root, x)
if not isdir(y): return False
return exists(join(y, 'list.txt')) \
or exists(join(y, 'train', 'meta.json'))\
or exists(join(y, 'ImageSets', '2016', 'val.txt'))
zoos = list(filter(valid, zoos))
return zoos
def load_dataset(vot_path, dataset):
info = OrderedDict()
if 'VOT' in dataset:
base_path = join(vot_path, dataset)
# if not exists(base_path):
# logging.error("Please download test dataset!!!")
# exit()
list_path = join(base_path, 'list.txt')
f = get_txt(list_path)
videos = [v.strip() for v in f.strip().split('\n')]
#print(videos)
for video in videos:
video_path = join(base_path, video)
image_path = join(video_path, 'color')
if not exists(image_path):
image_path = video_path
image_files = sorted(get_files(image_path, '.jpg'))
image_files = [join(image_path, x) for x in image_files]
gt_path = join(video_path, 'groundtruth.txt')
gt = get_txt(gt_path)
gt = gt.strip().split('\n')
gt = np.asarray([line.split(',') for line in gt], np.float32)
if gt.shape[1] == 4:
gt = np.column_stack(
(gt[:, 0], gt[:, 1], gt[:, 0], gt[:, 1] + gt[:, 3] - 1,
gt[:, 0] + gt[:, 2] - 1, gt[:, 1] + gt[:, 3] - 1,
gt[:, 0] + gt[:, 2] - 1, gt[:, 1]))
info[video] = {'image_files': image_files, 'gt': gt, 'name': video}
return info
```
#### File: videoanalyst/model/builder.py
```python
from typing import Dict
from yacs.config import CfgNode
from .backbone import builder as backbone_builder
from .loss import builder as loss_builder
from .task_head import builder as head_builder
from .task_model import builder as task_builder
def build_model(
task: str,
cfg: CfgNode,
):
r"""
Builder function.
Arguments
---------
task: str
builder task name (track|vos)
cfg: CfgNode
buidler configuration
Returns
-------
torch.nn.Module
module built by builder
"""
if task == "track":
backbone = backbone_builder.build(task, cfg.backbone)
losses = loss_builder.build(task, cfg.losses)
head = head_builder.build(task, cfg.task_head)
task_model = task_builder.build(task, cfg.task_model, backbone, head,
losses)
return task_model
else:
print("model for task {} is not complted".format(task))
exit(-1)
def get_config() -> Dict[str, CfgNode]:
r"""
Get available component list config
Returns
-------
Dict[str, CfgNode]
config with list of available components
"""
cfg_dict = {"track": CfgNode(), "vos": CfgNode()}
for task in cfg_dict:
cfg = cfg_dict[task]
cfg["backbone"] = backbone_builder.get_config()[task]
cfg["losses"] = loss_builder.get_config()[task]
cfg["task_model"] = task_builder.get_config()[task]
cfg["task_head"] = head_builder.get_config()[task]
return cfg_dict
```
#### File: pipeline/tracker/builder.py
```python
import logging
from typing import Dict
from yacs.config import CfgNode
from videoanalyst.pipeline.tracker.tracker_base import TRACK_PIPELINES
# from videoanalyst.model.module_base import TrackerBase
logger = logging.getLogger(__file__)
def build(cfg: CfgNode, **kwargs):
r"""
Builder function.
Arguments
---------
cfg: CfgNode
buidler configuration
Returns
-------
torch.nn.Module
module built by builder
"""
track_pipelines = TRACK_PIPELINES
trackpipeline_name = cfg.name
track_pipeline = track_pipelines[trackpipeline_name](**kwargs)
hps = track_pipeline.get_hps()
for hp_name in hps:
if hp_name in cfg[trackpipeline_name]:
new_value = cfg[trackpipeline_name][hp_name]
hps[hp_name] = new_value
track_pipeline.set_hps(hps)
track_pipeline.update_params()
return track_pipeline
def get_config() -> Dict[str, CfgNode]:
r"""
Get available component list config
Returns
-------
Dict[str, CfgNode]
config with list of available components
"""
cfg_dict = {"track": CfgNode()}
for cfg_name, task_module in zip(["track"], [TRACK_PIPELINES]):
cfg = cfg_dict[cfg_name]
cfg["name"] = "unknown"
for name in task_module:
cfg[name] = CfgNode()
task_model = task_module[name]
hps = task_model.default_hyper_params
for hp_name in hps:
cfg[name][hp_name] = hps[hp_name]
return cfg_dict
``` |
{
"source": "jiangcy1994/SgsDaily",
"score": 3
} |
#### File: jiangcy1994/SgsDaily/browser.py
```python
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
from config import *
__all__ = ['OpenGame', 'QuitGame']
def OpenGame(url, username, password):
driver = webdriver.Chrome()
driver.get(url)
title = driver.title
username_input, password_input = driver.find_elements(By.CLASS_NAME, 'dobest_input')
username_input.send_keys(username)
password_input.send_keys(password)
driver.find_element(By.CLASS_NAME, 'mycheckbox').click()
driver.find_element(By.CLASS_NAME, 'dobest_de_btn').click()
time.sleep(operation_interval)
driver.find_element(By.CSS_SELECTOR, '.new_ser1:nth-child(2)').click()
driver.find_element(By.ID, "newGoInGame").click()
time.sleep(page_load_interval)
return driver, title
def QuitGame(driver):
driver.quit()
``` |
{
"source": "jiangdaniel/dl-papers",
"score": 2
} |
#### File: dl-papers/bayes-by-backprop/main.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
import numpy as np
import torch as th
import torch.optim as optim
import torchvision
from tqdm import tqdm
from tensorboardX import SummaryWriter
import network
import utils
def main(args):
trainloader, testloader = get_loaders(args.batch_size, args.fashion)
writer = SummaryWriter(os.path.join("logs", args.dir))
net = network.BayesianNet().to(device)
optimizer = optim.SGD(net.parameters(), lr=args.lr)
for epoch in range(args.epochs):
running_loss = running_true_positive = running_count = 0.
for i, (x, labels) in enumerate(tqdm(trainloader, desc=f"Epoch {epoch}. Train data.")):
x, labels = x.view(-1, 784).to(device), labels.to(device)
pred, weights, biases = net.forward(x)
log_likelihood_prior = net.log_likelihood_prior(weights, biases)
log_likelihood_posterior = net.log_likelihood_posterior(weights, biases)
t = th.zeros(x.shape[0], 10, device=device)
t.scatter_(1, labels.unsqueeze(1), 1)
log_likelihood_data = (t * pred).sum()
loss = (log_likelihood_posterior - log_likelihood_prior) / len(trainloader) - log_likelihood_data
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_count += pred.shape[0]
running_true_positive += (pred.argmax(1) == labels).sum().item()
running_loss += loss.item()
acc_train = running_true_positive / running_count
loss_train = running_loss / running_count
running_loss = running_true_positive = running_count = 0.
with th.no_grad():
for x, labels in tqdm(testloader, desc=f"Epoch {epoch}. Test data."):
x, labels = x.view(-1, 784).to(device), labels.to(device)
pred, _, _ = net.forward(x)
running_count += pred.shape[0]
running_true_positive += (pred.argmax(1) == labels).sum().item()
acc_test = running_true_positive / running_count
writer.add_scalar("train/loss", loss_train, epoch)
writer.add_scalar("train/accuracy", acc_train, epoch)
writer.add_scalar("test/accuracy", acc_test, epoch)
print(f"Epoch {epoch}. Train loss: {loss_train}, Train accuracy: {acc_train}, Test accuracy: {acc_test}")
def get_loaders(batch_size, fashion=False):
mnist = torchvision.datasets.MNIST
root = "./data/mnist"
if fashion:
mnist = torchvision.datasets.FashionMNIST
root = "./data/fashion-mnist"
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),])
trainloader = th.utils.data.DataLoader(
mnist(root=root, train=True, download=True, transform=transform),
batch_size=batch_size,
shuffle=True,
num_workers=2)
testloader = th.utils.data.DataLoader(
mnist(root=root, train=False, download=True, transform=transform),
batch_size=batch_size,
shuffle=False,
num_workers=2)
return trainloader, testloader
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--batch-size", type=int, default=128)
parser.add_argument("--lr", type=int, default=0.001)
parser.add_argument("--fashion", action="store_true", default=False)
parser.add_argument("--dir", default=utils.timestamp())
parser.add_argument("--no-cuda", action="store_true", default=False)
args = parser.parse_args()
device = th.device("cpu" if (not th.cuda.is_available() or args.no_cuda) else "cuda")
main(args)
```
#### File: equilibrium-propagation/networks/network.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
from itertools import product
import numpy as np
import torch as th
import torch.nn as nn
import torchvision
from tqdm import tqdm
from tensorboardX import SummaryWriter
import utils
class Net(nn.Module):
def __init__(self, n_layers, device="cpu"):
assert n_layers > 0
self.n_layers = n_layers
a = np.sqrt(2. / (784. + 500.))
self.weights = [th.randn(784, 500, device=device) * a]
self.biases = [th.randn(500, device=device) * a]
a = np.sqrt(2. / (500. + 500.))
self.weights.extend([th.randn(500, 500, device=device) * a for _ in range(self.n_layers - 1)])
self.biases.extend([th.randn(500, device=device) * a for _ in range(self.n_layers - 1)])
a = np.sqrt(2. / (500. + 10.))
self.weights.append(th.randn(500, 10, device=device) * a)
self.biases.append(th.randn(10, device=device) * a)
# Hyperparameters
self.beta = 1.0
self.epsilon = 0.5
if n_layers == 1:
self.n_iter_free = 20
self.n_iter_clamped = 4
self.alphas = [0.1, 0.05]
elif n_layers == 2:
self.n_iter_free = 100
self.n_iter_clamped = 6
self.alphas = [0.4, 0.1, 0.01]
elif n_layers == 3:
self.n_iter_free = 500
self.n_iter_clamped = 8
self.alphas = [0.128, 0.032, 0.008, 0.002]
@staticmethod
def rho(x):
return x.clamp(0., 1.)
@staticmethod
def d_rho(x):
return ((x >= 0.) * (x <= 1.)).float()
def free_energy(self, x, units):
"""BROKEN: off by one on units"""
raise NotImplementedError
total = th.zeros(1)
total += unit[0].pow(2).sum()
total -= (self.weights[0] * (self.rho(x.t()) @ self.rho(units[0]))).sum()
total -= (self.units[0] @ self.rho(self.biases[0])).sum() * 2.
for i in range(1, len(self.weights)):
total += unit[i].pow(2).sum()
total -= (self.weights[i] * (self.rho(units[i-1].t()) @ self.rho(units[i]))).sum()
total -= (self.units[i] @ self.rho(self.biases[i])).sum() * 2.
return total.item() / 2.
def fixed_points(self, units, t):
d_units = [None] * len(units)
for _ in range(self.n_iter_free):
for i in range(1, len(units) - 1):
d_units[i] = self.d_rho(units[i]) * (units[i-1] @ self.weights[i-1] + units[i+1] @ self.weights[i].t() + self.biases[i-1]) - units[i]
d_units[-1] = self.d_rho(units[-1]) * (units[-2] @ self.weights[-1] + self.biases[-1]) - units[-1]
for i in range(1, len(units)):
units[i] = self.rho(units[i] + self.epsilon * d_units[i])
units_free = [u.clone() for u in units]
for _ in range(self.n_iter_clamped):
for i in range(1, len(units) - 1):
d_units[i] = self.d_rho(units[i]) * (units[i-1] @ self.weights[i-1] + units[i+1] @ self.weights[i].t() + self.biases[i-1]) - units[i]
d_units[-1] = self.d_rho(units[-1]) * (units[-2] @ self.weights[-1] + self.biases[-1]) - units[-1] + self.beta * (t - units[-1])
for i in range(1, len(units)):
units[i] = self.rho(units[i] + self.epsilon * d_units[i])
return units_free, units
def update(self, units_free, units_clamped):
batch_size = units_free[-1].shape[0]
for i in range(len(self.weights)):
self.weights[i] += self.alphas[i] / self.beta * (self.rho(units_clamped[i].t()) @ self.rho(units_clamped[i+1]) - self.rho(units_free[i].t()) @ self.rho(units_free[i+1])) / batch_size
self.biases[i] += self.alphas[i] / self.beta * (self.rho(units_clamped[i+1]) - self.rho(units_free[i+1])).mean(0)
```
#### File: dl-papers/equilibrium-propagation/numpy_one_layer.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import torch as th
import torchvision
from tqdm import tqdm
def main(args):
trainloader, testloader = get_loaders(args.batch_size, args.fashion)
epsilon = 0.5
beta = 1.0
alpha1 = 0.1
alpha2 = 0.05
a = np.sqrt(2.0 / (784 + 500))
W1 = np.random.uniform(-a, a, (784, 500))
b1 = np.random.uniform(-a, a, 500)
a = np.sqrt(2.0 / (500 + 10))
W2 = np.random.uniform(-a, a, (500, 10))
b2 = np.random.uniform(-a, a, 10)
states = [(np.random.uniform(0, 1., (args.batch_size, 500)), \
np.random.uniform(0, 1., (args.batch_size, 10))) for _ in range(len(trainloader))]
for epoch in range(args.epochs):
running_loss = running_energy = running_true_positive = 0.
for i, (x, labels) in enumerate(tqdm(trainloader, desc=f"Epoch {epoch}")):
x, labels = x.view(-1, 784).numpy(), labels.numpy()
h, y = states[i]
# Free phase
for j in range(20):
dh = d_rho(h) * (x @ W1 + y @ W2.T + b1) - h
dy = d_rho(y) * (h @ W2 + b2) - y
h = rho(h + epsilon * dh)
y = rho(y + epsilon * dy)
'''
energy = (np.square(h).sum() + np.square(y).sum() \
- (W1 * (x.T @ h)).sum() - (W2 * (h.T @ y)).sum()) / 2 \
- (h @ b1).sum() - (y @ b2).sum())
print(np.round(energy, 4), np.round(np.linalg.norm(dh), 4))
'''
h_free, y_free = np.copy(h), np.copy(y)
states[i] = h_free, y_free
t = np.zeros((x.shape[0], 10))
t[np.arange(t.shape[0]), labels] = 1
# Weakly clamped phase
for j in range(4):
dh = d_rho(h) * (x @ W1 + y @ W2.T + b1) - h
dy = d_rho(y) * (h @ W2 + b2) - y + beta * (t - y)
h = rho(h + epsilon * dh)
y = rho(y + epsilon * dy)
'''
energy = (np.square(h).sum() + np.square(y).sum() \
- (W1 * (x.T @ h)).sum() - (W2 * (h.T @ y)).sum()) / 2 \
- (h @ b1).sum() - (y @ b2).sum()
print(np.round(energy, 4), np.round(np.linalg.norm(dh), 4))
'''
h_clamped = np.copy(h)
y_clamped = np.copy(y)
W1 += alpha1 / beta * (rho(x.T) @ rho(h_clamped) - rho(x.T) @ rho(h_free)) / args.batch_size
W2 += alpha2 / beta * (rho(h_clamped.T) @ rho(y_clamped) - rho(h_free.T) @ rho(y_free)) / args.batch_size
b1 += alpha1 / beta * (rho(h_clamped) - rho(h_free)).mean(0)
b2 += alpha2 / beta * (rho(y_clamped) - rho(y_free)).mean(0)
running_energy += (np.square(h_free).sum() + np.square(y_free).sum() \
- (W1 * (x.T @ h_free)).sum() - (W2 * (h_free.T @ y_free)).sum()) / 2 \
- (h_free @ b1).sum() - (y_free @ b2).sum()
running_loss += np.square(t - y_free).sum()
running_true_positive += np.count_nonzero(np.argmax(y_free, 1) == labels)
energy_avg = running_energy / (len(trainloader) * args.batch_size)
accuracy_avg = running_true_positive / (len(trainloader) * args.batch_size)
loss_avg = running_loss / (len(trainloader) * args.batch_size)
print(f"Energy: {energy_avg}, Accuracy: {accuracy_avg}, Loss: {loss_avg}")
def rho(x):
return np.copy(np.clip(x, 0., 1.))
def d_rho(x):
return (x >= 0.) * (x <= 1.)
def get_loaders(batch_size, fashion=False):
mnist = torchvision.datasets.MNIST
if fashion:
mnist = torchvision.datasets.FashionMNIST
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),])
trainloader = th.utils.data.DataLoader(
mnist(root="./data", train=True, download=True, transform=transform),
batch_size=batch_size,
shuffle=True,
num_workers=2)
testloader = th.utils.data.DataLoader(
mnist(root="./data", train=False, download=True, transform=transform),
batch_size=batch_size,
shuffle=False,
num_workers=2)
return trainloader, testloader
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=1000)
parser.add_argument("--batch-size", type=int, default=20)
parser.add_argument("--fashion", action="store_true", default=False)
args = parser.parse_args()
main(args)
```
#### File: dl-papers/equilibrium-propagation/pytorch_multi_layer.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
import numpy as np
import torch as th
import torchvision
from tqdm import tqdm
from tensorboardX import SummaryWriter
import utils
from networks.network import Net
def main(args):
writer = SummaryWriter(os.path.join("logs", args.dir))
trainloader, testloader = get_loaders(args.batch_size)
net = Net(n_layers=args.layers)
states = []
for x, labels in trainloader:
states.append([th.rand(args.batch_size, 500, device=device) for _ in range(args.layers)]
+ [th.rand(args.batch_size, 10, device=device)])
for epoch in range(args.epochs):
running_loss = running_energy = running_true_positive = 0.
for i, (x, labels) in enumerate(tqdm(trainloader)):
x, labels = x.to(device).view(x.shape[0], -1), labels.to(device)
t = th.zeros(x.shape[0], 10, device=device)
t.scatter_(1, labels.unsqueeze(1), 1)
units = [x] + states[i]
units_free, units_clamped = net.fixed_points(units, t)
states[i] = units_free[1:]
net.update(units_free, units_clamped)
running_true_positive += (units_free[-1].argmax(1) == labels).sum().item()
running_loss += (t - units_free[-1]).pow(2).sum().item()
energy_train = running_energy / (len(trainloader) * args.batch_size)
accuracy_train = running_true_positive / (len(trainloader) * args.batch_size)
loss_train = running_loss / (len(trainloader) * args.batch_size)
print(f"Energy: {energy_train}, Accuracy: {accuracy_train}, Loss: {loss_train}")
writer.add_scalar(f"loss", loss_train, epoch)
writer.add_scalar(f"energy", energy_train, epoch)
writer.add_scalar(f"accuracy", accuracy_train, epoch)
def get_loaders(batch_size, fashion=False):
mnist = torchvision.datasets.MNIST
if fashion:
mnist = torchvision.datasets.FashionMNIST
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),])
trainloader = th.utils.data.DataLoader(
mnist(root="./data", train=True, download=True, transform=transform),
batch_size=batch_size,
shuffle=True,
num_workers=2)
testloader = th.utils.data.DataLoader(
mnist(root="./data", train=False, download=True, transform=transform),
batch_size=batch_size,
shuffle=False,
num_workers=2)
return trainloader, testloader
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--layers", type=int, default=1)
parser.add_argument("--epochs", type=int, default=25)
parser.add_argument("--batch-size", type=int, default=20)
parser.add_argument("--fashion", action="store_true", default=False,
help="use fashion mnist")
parser.add_argument("--dir", default=utils.timestamp(),
help="name of output log directory")
parser.add_argument("--no-cuda", action="store_true", default=False)
args = parser.parse_args()
device = th.device("cpu" if (not th.cuda.is_available() or args.no_cuda) else "cuda")
device = th.device("cpu") # gpu version not working
main(args)
```
#### File: dl-papers/nn/nn.py
```python
import os
import argparse
import numpy as np
from sklearn import preprocessing
from sklearn import datasets
from tqdm import tqdm
class Network(object):
def __init__(self):
self.linear1 = Linear(64, 128)
self.relu1 = ReLU()
self.linear2 = Linear(128, 64)
self.relu2 = ReLU()
self.linear3 = Linear(64, 10)
def forward(self, x):
out = self.relu1(self.linear1(x))
out = self.relu2(self.linear2(out))
out = self.linear3(out)
return out
def __call__(self, x):
return self.forward(x)
class Linear(object):
def __init__(self, input_size, output_size):
self.W = np.zeros((input_size, output_size))
self.cache = None
self.reset_parameters()
def forward(self, x):
self.cache = x
return x @ self.W
def backward(self, grad):
pass
def reset_parameters(self):
var = 1 / self.W.shape[0]
self.W = np.random.normal(loc=0, scale=var, size=self.W.shape)
def __call__(self, x):
return self.forward(x)
class ReLU(object):
def __init__(self):
self.cache = None
def forward(self, x):
self.cache = x
return np.clip(x, a_min=0, a_max=None)
def __call__(self, x):
return self.forward(x)
def softmax(X):
"""https://deepnotes.io/softmax-crossentropy"""
exps = np.exp(X - np.max(X))
return exps / np.sum(exps)
def cross_entropy(X, y):
"""https://deepnotes.io/softmax-crossentropy"""
m = y.shape[0]
p = softmax(X)
log_likelihood = -np.log(p[range(m),y])
loss = np.sum(log_likelihood) / m
return loss
def main(args):
data, target = datasets.load_digits(return_X_y=True)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data, target = data[indices], target[indices]
splits = (int(0.7 * data.shape[0]), int(0.9 * data.shape[0]))
scaler = preprocessing.StandardScaler().fit(data[:splits[0]])
data = scaler.transform(data)
train, val, test = zip(np.split(data, splits), np.split(target, splits))
net = Network()
for epoch in range(args.epochs):
pred = net(train[0])
loss = cross_entropy(pred, train[1])
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=100)
args = parser.parse_args()
main(args)
```
#### File: dl-papers/sparse-transformations/utils.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
def timestamp():
return datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
def plot(means, sigmas, values, shape=None, axes=None, flip_y=None, alpha_global=1.0):
"""
From https://github.com/MaestroGraph/sparse-hyper/
:param means:
:param sigmas:
:param values:
:param shape:
:param axes:
:param flip_y: If not None, interpreted as the max y value. y values in the scatterplot are
flipped so that the max is equal to zero and vice versa.
:return:
"""
b, n, d = means.size()
means = means.data[0, :,:].cpu().numpy()
sigmas = sigmas.data[0, :].cpu().numpy()
values = values.tanh().data[0, :].cpu().numpy()
if flip_y is not None:
means[:, 0] = flip_y - means[:, 0]
norm = mpl.colors.Normalize(vmin=-1.0, vmax=1.0)
cmap = mpl.cm.RdYlBu
map = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
if axes is None:
axes = plt.gca()
colors = []
for i in range(n):
color = map.to_rgba(values[i])
alpha = min(0.8, max(0.05, ((sigmas[i, 0] * sigmas[i, 0])+1.0)**-2)) * alpha_global
axes.add_patch(Ellipse((means[i, 1], means[i, 0]), width=sigmas[i,1], height=sigmas[i,0], color=color, alpha=alpha, linewidth=0))
colors.append(color)
axes.scatter(means[:, 1], means[:, 0], s=5, c=colors, zorder=100, linewidth=0, edgecolor='k', alpha=alpha_global)
if shape is not None:
m = max(shape)
step = 1 if m < 100 else m//25
# gray points for the integer index tuples
x, y = np.mgrid[0:shape[0]:step, 0:shape[1]:step]
axes.scatter(x.ravel(), y.ravel(), c='k', s=5, marker='D', zorder=-100, linewidth=0, alpha=0.1* alpha_global)
axes.spines['right'].set_visible(False)
axes.spines['top'].set_visible(False)
axes.spines['bottom'].set_visible(False)
axes.spines['left'].set_visible(False)
``` |
{
"source": "jiangdapeng/netease",
"score": 3
} |
#### File: task/hadoop/statistics_reduce.py
```python
import sys
MAX_LEVEL = 100000
# fields in value
LEVEL = 0
COIN = 1
SCORE = 2
def reduce():
key = None
pre = None
records = []
for line in sys.stdin:
fields = line.split("\t")
key = fields[0]
# 转换为整数
v = [int(item) for item in fields[1:]]
if key!= pre and pre != None:
# id已经改变
high = -1
low = MAX_LEVEL
count = len(records)
total_coin = 0
max_score = -1
for r in records:
# 总的金钱
total_coin += r[COIN]
# 最高等级和最低等级
if r[LEVEL] > high:
high = r[LEVEL]
if r[LEVEL] < low:
low = r[LEVEL]
# 最高分数
if r[SCORE] > max_score:
max_score = r[SCORE]
print("%s\t%d\t%d\t%.2f\t%d" % (pre, high, low, total_coin*1.0/count,max_score))
pre = key
records = [v]
else:
# 还是同一个id的数据
records.append(v)
pre = key
# 处理最后一批数据
if pre != None:
high = -1
low = MAX_LEVEL
count = len(records)
total_coin = 0
max_score = -1
for r in records:
# 总的金钱
total_coin += r[COIN]
# 最高等级和最低等级
if r[LEVEL] > high:
high = r[LEVEL]
if r[LEVEL] < low:
low = r[LEVEL]
# 最高分数
if r[SCORE] > max_score:
max_score = r[SCORE]
print("%s\t%d\t%d\t%.2f\t%d" % (pre, high, low, total_coin*1.0/count,max_score))
reduce()
```
#### File: task/learn-python/f_a_b.py
```python
def f(a,b):
'''
f(a,b) =
0, if a<0 and b <0
1, else if a == 0
a, else if b == 0
f(a-1,b) + 2* f(a,b-1) + 1, otherwise
'''
if a >= 0 and b >= 0:
cache = [[None for j in range(b+1)] for i in range(a+1)]
for j in range(b+1):
cache[0][j] = 1
for i in range(1,a+1):
cache[i][0] = i
def do_f(a,b):
#print("a = %d, b=%d" % (a,b))
if a < 0 or b <0:
return 0
if cache[a][b] == None:
cache[a][b] = do_f(a-1,b) + 2*do_f(a,b-1) + 1
return cache[a][b]
return do_f(a,b)
def test():
for i in range(10):
for j in range(10):
print("f(%d,%d)=%d" % (i,j,f(i,j)))
if __name__ == "__main__":
test()
``` |
{
"source": "JiangDonglai98/RocksDict",
"score": 2
} |
#### File: RocksDict/test/benchmark.py
```python
from rocksdict import Rdict, Mdict
from random import getrandbits, uniform
import shutil
import os
import dbm
import pytest
# %%
def randbytes(n):
"""Generate n random bytes."""
return getrandbits(n * 8).to_bytes(n, 'little')
def clear_dir(path='./temp'):
shutil.rmtree(path)
class TargetDict:
def __init__(self, key_size: int, data_size: int, place='./', pressure_num=1000000): # ./temp
self.sample_path = os.path.join(place, 'temp')
self.pressure_path = os.path.join(place, 'pressure_temp')
self.sample_dbm = os.path.join(place, 'tmp.db')
self.pressure_dbm = os.path.join(place, 'pressure_tmp.db')
self.key_size = key_size
self.data_size = data_size
self.pressure_num = pressure_num
self.r_dict = Rdict(self.sample_path)
self.pressure_r_dict = Rdict(self.pressure_path)
self.m_dict = Mdict()
self.pressure_m_dict = Mdict()
self.p_dict = dict()
self.pressure_p_dict = dict()
self.dbm_dict = dbm.open(self.sample_dbm, 'n')
self.pressure_dbm_dict = dbm.open(self.pressure_dbm, 'n')
self.ref_dict = dict()
self.backup_dict = dict()
def prepare_ref_data(self, ref_num=2000000, dict_type='p'):
if dict_type == 'p':
for i in range(ref_num * 3):
self.ref_dict[randbytes(self.key_size)] = randbytes(self.data_size)
self.backup_dict[randbytes(self.key_size)] = randbytes(self.data_size)
else:
for i in range(ref_num):
self.ref_dict[randbytes(self.key_size)] = randbytes(self.data_size)
self.backup_dict[randbytes(self.key_size)] = randbytes(self.data_size)
def prepare_data(self, dict_type: str = 'r'):
if self.ref_dict.__len__() == 0:
raise RuntimeError
elif dict_type == 'r':
for key, value in self.ref_dict.items():
self.r_dict[key] = value
elif dict_type == 'm':
for key, value in self.ref_dict.items():
self.m_dict[key] = value
elif dict_type == 'p':
for key, value in self.ref_dict.items():
self.p_dict[key] = value
elif dict_type == 'dbm':
for key, value in self.ref_dict.items():
self.dbm_dict[key] = value
else:
raise RuntimeError('Wrong dictionary type!')
def prepare_pressure_data(self, dict_type: str = 'r'):
if dict_type == 'r':
for i in range(self.pressure_num):
self.pressure_r_dict[randbytes(self.key_size)] = randbytes(self.data_size)
elif dict_type == 'm':
for i in range(self.pressure_num):
self.pressure_m_dict[randbytes(self.key_size)] = randbytes(self.data_size)
elif dict_type == 'p':
for i in range(self.pressure_num):
self.pressure_p_dict[randbytes(self.key_size)] = randbytes(self.data_size)
elif dict_type == 'dbm':
for i in range(self.pressure_num):
self.pressure_dbm_dict[randbytes(self.key_size)] = randbytes(self.data_size)
else:
raise RuntimeError('Wrong dictionary type!')
def clear_data(self):
self.r_dict.destroy()
# clear_dir(self.sample_dbm + '.dat')
# clear_dir(self.sample_dbm + '.dir')
def clear_pressure_data(self):
self.pressure_r_dict.destroy()
# clear_dir(self.pressure_dbm + '.dat')
# clear_dir(self.pressure_dbm + '.dir')
def insert(tar_dict: TargetDict, dict_type: str = 'r'):
key, value = tar_dict.ref_dict.popitem()
if dict_type == 'r':
tar_dict.r_dict[key] = value
elif dict_type == 'm':
tar_dict.m_dict[key] = value
elif dict_type == 'p':
tar_dict.p_dict[key] = value
elif dict_type == 'dbm':
tar_dict.dbm_dict[key] = value
else:
raise RuntimeError('Wrong dictionary type!')
def insert_drop(tar_dict: TargetDict, dict_type: str = 'r'):
key, value = tar_dict.ref_dict.popitem()
b_key, b_value = tar_dict.backup_dict.popitem()
if dict_type == 'r':
tar_dict.r_dict[b_key] = b_value
del tar_dict.r_dict[key]
elif dict_type == 'm':
tar_dict.m_dict[b_key] = b_value
del tar_dict.m_dict[key]
elif dict_type == 'p':
tar_dict.p_dict[b_key] = b_value
del tar_dict.p_dict[key]
elif dict_type == 'dbm':
tar_dict.dbm_dict[b_key] = b_value
del tar_dict.dbm_dict[key]
else:
raise RuntimeError('Wrong dictionary type!')
def mixture(tar_dict: TargetDict, dict_type: str = 'r'):
key, value = tar_dict.ref_dict.popitem()
b_key, b_value = tar_dict.backup_dict.popitem()
add = uniform(0, 1) >= 0.5
if dict_type == 'r':
if add:
tar_dict.r_dict[b_key] = b_value
else:
del tar_dict.r_dict[key]
elif dict_type == 'm':
if add:
tar_dict.m_dict[b_key] = b_value
else:
del tar_dict.m_dict[key]
elif dict_type == 'p':
if add:
tar_dict.p_dict[b_key] = b_value
else:
del tar_dict.p_dict[key]
elif dict_type == 'dbm':
if add:
tar_dict.dbm_dict[b_key] = b_value
else:
del tar_dict.dbm_dict[key]
else:
raise RuntimeError('Wrong dictionary type!')
def pressure_insert(tar_dict: TargetDict, dict_type: str = 'r'):
key, value = tar_dict.ref_dict.popitem()
if dict_type == 'r':
tar_dict.pressure_r_dict[key] = value
elif dict_type == 'm':
tar_dict.pressure_m_dict[key] = value
elif dict_type == 'p':
tar_dict.pressure_p_dict[key] = value
elif dict_type == 'dbm':
tar_dict.pressure_dbm_dict[key] = value
else:
raise RuntimeError('Wrong dictionary type!')
# -----------------------------------------------
def test_insert_key4_data4_Rdict(benchmark):
target_dict = TargetDict(4, 4)
target_dict.prepare_ref_data(dict_type='n')
benchmark(insert, target_dict, 'r')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_insert_key4_data4_Mdict(benchmark):
target_dict = TargetDict(4, 4)
target_dict.prepare_ref_data(dict_type='n')
benchmark(insert, target_dict, 'm')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_insert_key8_data128_Rdict(benchmark):
target_dict = TargetDict(8, 128)
target_dict.prepare_ref_data(dict_type='n')
benchmark(insert, target_dict, 'r')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_insert_key8_data128_Mdict(benchmark):
target_dict = TargetDict(8, 128)
target_dict.prepare_ref_data(dict_type='n')
benchmark(insert, target_dict, 'm')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_insert_key16_data1024_Rdict(benchmark):
target_dict = TargetDict(16, 1024)
target_dict.prepare_ref_data(dict_type='n')
benchmark(insert, target_dict, 'r')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_insert_key16_data1024_Mdict(benchmark):
target_dict = TargetDict(16, 1024)
target_dict.prepare_ref_data(dict_type='n')
benchmark(insert, target_dict, 'm')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_insert_key128_data16_Rdict(benchmark):
target_dict = TargetDict(128, 16)
target_dict.prepare_ref_data(dict_type='n')
benchmark(insert, target_dict, 'r')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_insert_key128_data16_Mdict(benchmark):
target_dict = TargetDict(128, 16)
target_dict.prepare_ref_data(dict_type='n')
benchmark(insert, target_dict, 'm')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_insert_key128_data16_Pdict(benchmark):
target_dict = TargetDict(128, 16)
target_dict.prepare_ref_data(dict_type='p')
benchmark(insert, target_dict, 'p')
target_dict.clear_data()
target_dict.clear_pressure_data()
# def test_insert_key128_data16_DBMdict(benchmark):
# target_dict = TargetDict(128, 16)
# target_dict.prepare_ref_data(dict_type='n')
# benchmark(insert, target_dict, 'dbm')
# target_dict.clear_data()
# target_dict.clear_pressure_data()
# -----------------------------------------
def test_insert_drop_key4_data4_Rdict(benchmark):
target_dict = TargetDict(4, 4)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_data('r')
benchmark(insert_drop, target_dict, 'r')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_insert_drop_key4_data4_Mdict(benchmark):
target_dict = TargetDict(4, 4)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_data('m')
benchmark(insert_drop, target_dict, 'm')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_insert_drop_key8_data128_Rdict(benchmark):
target_dict = TargetDict(8, 128)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_data('r')
benchmark(insert_drop, target_dict, 'r')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_insert_drop_key8_data128_Mdict(benchmark):
target_dict = TargetDict(8, 128)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_data('m')
benchmark(insert_drop, target_dict, 'm')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_insert_drop_key16_data1024_Rdict(benchmark):
target_dict = TargetDict(16, 1024)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_data('r')
benchmark(insert_drop, target_dict, 'r')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_insert_drop_key16_data1024_Mdict(benchmark):
target_dict = TargetDict(16, 1024)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_data('m')
benchmark(insert_drop, target_dict, 'm')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_insert_drop_key128_data16_Rdict(benchmark):
target_dict = TargetDict(128, 16)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_data('r')
benchmark(insert_drop, target_dict, 'r')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_insert_drop_key128_data16_Mdict(benchmark):
target_dict = TargetDict(128, 16)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_data('m')
benchmark(insert_drop, target_dict, 'm')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_insert_drop_key128_data16_Pdict(benchmark):
target_dict = TargetDict(128, 16)
target_dict.prepare_ref_data(dict_type='p')
target_dict.prepare_data('p')
benchmark(insert_drop, target_dict, 'p')
target_dict.clear_data()
target_dict.clear_pressure_data()
# def test_insert_drop_key128_data16_DBMdict(benchmark):
# target_dict = TargetDict(128, 16)
# target_dict.prepare_ref_data(dict_type='n')
# target_dict.prepare_data('dbm')
# benchmark(insert_drop, target_dict, 'dbm')
# target_dict.clear_data()
# target_dict.clear_pressure_data()
# ----------------------------------------------------
def test_mixture_key4_data4_Rdict(benchmark):
target_dict = TargetDict(4, 4)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_data('r')
benchmark(mixture, target_dict, 'r')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_mixture_key4_data4_Mdict(benchmark):
target_dict = TargetDict(4, 4)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_data('m')
benchmark(mixture, target_dict, 'm')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_mixture_key8_data128_Rdict(benchmark):
target_dict = TargetDict(8, 128)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_data('r')
benchmark(mixture, target_dict, 'r')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_mixture_key8_data128_Mdict(benchmark):
target_dict = TargetDict(8, 128)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_data('m')
benchmark(mixture, target_dict, 'm')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_mixture_key16_data1024_Rdict(benchmark):
target_dict = TargetDict(16, 1024)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_data('r')
benchmark(mixture, target_dict, 'r')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_mixture_key16_data1024_Mdict(benchmark):
target_dict = TargetDict(16, 1024)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_data('m')
benchmark(mixture, target_dict, 'm')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_mixture_key128_data16_Rdict(benchmark):
target_dict = TargetDict(128, 16)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_data('r')
benchmark(mixture, target_dict, 'r')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_mixture_key128_data16_Mdict(benchmark):
target_dict = TargetDict(128, 16)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_data('m')
benchmark(mixture, target_dict, 'm')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_mixture_key128_data16_Pdict(benchmark):
target_dict = TargetDict(128, 16)
target_dict.prepare_ref_data(dict_type='p')
target_dict.prepare_data('p')
benchmark(mixture, target_dict, 'p')
target_dict.clear_data()
target_dict.clear_pressure_data()
# def test_mixture_key128_data16_DBMdict(benchmark):
# target_dict = TargetDict(128, 16)
# target_dict.prepare_ref_data(dict_type='n')
# target_dict.prepare_data('dbm')
# benchmark(mixture, target_dict, 'dbm')
# target_dict.clear_data()
# target_dict.clear_pressure_data()
# ------------------------------------------------
def test_pressure_insert_key4_data4_Rdict(benchmark):
target_dict = TargetDict(4, 4)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_pressure_data('r')
benchmark(pressure_insert, target_dict, 'r')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_pressure_insert_key4_data4_Mdict(benchmark):
target_dict = TargetDict(4, 4)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_pressure_data('m')
benchmark(pressure_insert, target_dict, 'm')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_pressure_insert_key8_data128_Rdict(benchmark):
target_dict = TargetDict(8, 128)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_pressure_data('r')
benchmark(pressure_insert, target_dict, 'r')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_pressure_insert_key8_data128_Mdict(benchmark):
target_dict = TargetDict(8, 128)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_pressure_data('m')
benchmark(pressure_insert, target_dict, 'm')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_pressure_insert_key16_data1024_Rdict(benchmark):
target_dict = TargetDict(16, 1024)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_pressure_data('r')
benchmark(pressure_insert, target_dict, 'r')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_pressure_insert_key16_data1024_Mdict(benchmark):
target_dict = TargetDict(16, 1024)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_pressure_data('m')
benchmark(pressure_insert, target_dict, 'm')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_pressure_insert_key128_data16_Rdict(benchmark):
target_dict = TargetDict(128, 16)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_pressure_data('r')
benchmark(pressure_insert, target_dict, 'r')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_pressure_insert_key128_data16_Mdict(benchmark):
target_dict = TargetDict(128, 16)
target_dict.prepare_ref_data(dict_type='n')
target_dict.prepare_pressure_data('m')
benchmark(pressure_insert, target_dict, 'm')
target_dict.clear_data()
target_dict.clear_pressure_data()
def test_pressure_insert_key128_data16_Pdict(benchmark):
target_dict = TargetDict(128, 16)
target_dict.prepare_ref_data(dict_type='p')
target_dict.prepare_pressure_data('p')
benchmark(pressure_insert, target_dict, 'p')
target_dict.clear_data()
target_dict.clear_pressure_data()
# def test_pressure_insert_key128_data16_DBMdict(benchmark):
# target_dict = TargetDict(128, 16)
# target_dict.prepare_ref_data(dict_type='n')
# target_dict.prepare_pressure_data('dbm')
# benchmark(pressure_insert, target_dict, 'dbm')
# target_dict.clear_data()
# target_dict.clear_pressure_data()
```
#### File: RocksDict/test/test_rdict.py
```python
import unittest
from sys import getrefcount
from rocksdict import Rdict, Options, PlainTableFactoryOptions, SliceTransform
from random import randint, random, getrandbits
TEST_INT_RANGE_UPPER = 999999
def randbytes(n):
"""Generate n random bytes."""
return getrandbits(n * 8).to_bytes(n, 'little')
def compare_int_dicts(test_case: unittest.TestCase,
ref_dict: dict,
test_dict: Rdict,
lower: int,
upper: int):
# assert that the keys are the same
keys_ref = list(ref_dict.keys())
keys_ref.sort()
keys_test = [k for k in range(lower, upper) if k in test_dict]
test_case.assertEqual(keys_ref, keys_test)
# assert that the values are the same
for k, v in ref_dict.items():
test_case.assertTrue(k in test_dict)
test_case.assertEqual(test_dict[k], v)
def compare_dicts(test_case: unittest.TestCase,
ref_dict: dict,
test_dict: Rdict):
# assert that the values are the same
for k, v in ref_dict.items():
test_case.assertTrue(k in test_dict)
test_case.assertEqual(test_dict[k], v)
class TestInt(unittest.TestCase):
test_dict = None
ref_dict = None
opt = None
@classmethod
def setUpClass(cls) -> None:
cls.opt = Options()
cls.opt.create_if_missing(True)
cls.opt.set_plain_table_factory(PlainTableFactoryOptions())
cls.opt.set_prefix_extractor(SliceTransform.create_max_len_prefix(8))
cls.test_dict = Rdict("./temp_int", cls.opt)
cls.ref_dict = dict()
def test_add_integer(self):
for i in range(10000):
key = randint(0, TEST_INT_RANGE_UPPER - 1)
value = randint(0, TEST_INT_RANGE_UPPER - 1)
self.ref_dict[key] = value
self.test_dict[key] = value
compare_int_dicts(self, self.ref_dict, self.test_dict, 0, TEST_INT_RANGE_UPPER)
def test_delete_integer(self):
for i in range(5000):
key = randint(0, TEST_INT_RANGE_UPPER - 1)
if key in self.ref_dict:
del self.ref_dict[key]
del self.test_dict[key]
compare_int_dicts(self, self.ref_dict, self.test_dict, 0, TEST_INT_RANGE_UPPER)
def test_reopen(self):
self.test_dict.close()
self.test_dict = None
test_dict = Rdict("./temp_int", self.opt)
compare_int_dicts(self, self.ref_dict, test_dict, 0, TEST_INT_RANGE_UPPER)
def test_get_batch(self):
keys = list(self.ref_dict.keys())[:100]
self.assertEqual(self.test_dict[keys + ["no such key"] * 3], [self.ref_dict[k] for k in keys] + [None] * 3)
@classmethod
def tearDownClass(cls):
Rdict("./temp_int", cls.opt).destroy(cls.opt)
class TestFloat(unittest.TestCase):
test_dict = None
ref_dict = None
opt = None
@classmethod
def setUpClass(cls) -> None:
cls.opt = Options()
cls.opt.create_if_missing(True)
cls.test_dict = Rdict("./temp_float", cls.opt)
cls.ref_dict = dict()
def test_add_float(self):
for i in range(10000):
key = random()
value = random()
self.ref_dict[key] = value
self.test_dict[key] = value
compare_dicts(self, self.ref_dict, self.test_dict)
def test_delete_float(self):
for i in range(5000):
keys = [k for k in self.ref_dict.keys()]
key = keys[randint(0, len(self.ref_dict) - 1)]
del self.ref_dict[key]
del self.test_dict[key]
compare_dicts(self, self.ref_dict, self.test_dict)
def test_reopen(self):
self.test_dict.close()
self.test_dict = None
test_dict = Rdict("./temp_float", self.opt)
compare_dicts(self, self.ref_dict, test_dict)
def test_get_batch(self):
keys = list(self.ref_dict.keys())[:100]
self.assertEqual(self.test_dict[keys + ["no such key"] * 3], [self.ref_dict[k] for k in keys] + [None] * 3)
@classmethod
def tearDownClass(cls):
Rdict("./temp_float", cls.opt).destroy(cls.opt)
class TestBytes(unittest.TestCase):
test_dict = None
ref_dict = None
opt = None
@classmethod
def setUpClass(cls) -> None:
cls.opt = Options()
cls.opt.create_if_missing(True)
cls.test_dict = Rdict("./temp_bytes", cls.opt)
cls.ref_dict = dict()
def test_add_bytes(self):
for i in range(10000):
key = randbytes(10)
value = randbytes(20)
self.assertEqual(getrefcount(key), 2)
self.assertEqual(getrefcount(value), 2)
self.test_dict[key] = value
# rdict does not increase ref_count
self.assertEqual(getrefcount(key), 2)
self.assertEqual(getrefcount(value), 2)
self.ref_dict[key] = value
self.assertEqual(getrefcount(key), 3)
self.assertEqual(getrefcount(value), 3)
compare_dicts(self, self.ref_dict, self.test_dict)
def test_delete_bytes(self):
for i in range(5000):
keys = [k for k in self.ref_dict.keys()]
key = keys[randint(0, len(self.ref_dict) - 1)]
# key + ref_dict + keys + getrefcount -> 4
self.assertEqual(getrefcount(key), 4)
del self.test_dict[key]
self.assertEqual(getrefcount(key), 4)
del self.ref_dict[key]
self.assertEqual(getrefcount(key), 3)
compare_dicts(self, self.ref_dict, self.test_dict)
def test_reopen(self):
self.test_dict.close()
self.test_dict = None
test_dict = Rdict("./temp_bytes", self.opt)
compare_dicts(self, self.ref_dict, test_dict)
def test_get_batch(self):
keys = list(self.ref_dict.keys())[:100]
self.assertEqual(self.test_dict[keys + ["no such key"] * 3], [self.ref_dict[k] for k in keys] + [None] * 3)
@classmethod
def tearDownClass(cls):
Rdict("./temp_bytes", cls.opt).destroy(cls.opt)
class TestString(unittest.TestCase):
test_dict = None
opt = None
@classmethod
def setUpClass(cls) -> None:
cls.opt = Options()
cls.opt.create_if_missing(True)
cls.test_dict = Rdict("./temp_string", cls.opt)
def test_string(self):
self.test_dict["Guangdong"] = "Shenzhen"
self.test_dict["Sichuan"] = "Changsha"
# overwrite
self.test_dict["Sichuan"] = "Chengdu"
self.test_dict["Beijing"] = "Beijing"
del self.test_dict["Beijing"]
# assertions
self.assertNotIn("Beijing", self.test_dict)
self.assertEqual(self.test_dict["Sichuan"], "Chengdu")
self.assertEqual(self.test_dict["Guangdong"], "Shenzhen")
@classmethod
def tearDownClass(cls):
cls.test_dict.destroy(cls.opt)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jiangdou2015/blog",
"score": 2
} |
#### File: blog/blogpost/api.py
```python
from django.contrib.auth.models import User
from rest_framework import serializers, viewsets
from rest_framework.authentication import BasicAuthentication, SessionAuthentication
from rest_framework.permissions import BasePermission
from rest_framework.response import Response
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from blogpost.models import Blogpost
from rest_framework import permissions
SAFE_METHODS = ['GET', 'HEAD', 'OPTIONS']
class IsAuthenticatedOrReadOnly(BasePermission):
def has_permission(self, request, view):
if (request.method in SAFE_METHODS or
request.user and
request.user.is_authenticated()):
return True
return False
class BlogpsotSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Blogpost
fields = ('title', 'author', 'body', 'slug', 'id')
class BlogpostSet(viewsets.ModelViewSet):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
serializer_class = BlogpsotSerializer
search_fields = 'title'
def get_queryset(self):
return Blogpost.objects.all()
def list(self, request):
queryset = Blogpost.objects.all()
search_param = self.request.query_params.get('title', None)
if search_param is not None:
queryset = Blogpost.objects.filter(title__contains=search_param)
serializer = BlogpsotSerializer(queryset, many=True)
return Response(serializer.data)
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'date_joined', 'last_login')
class UserDetail(viewsets.ReadOnlyModelViewSet):
authentication_classes = [JSONWebTokenAuthentication, BasicAuthentication, SessionAuthentication]
permission_classes = (permissions.IsAuthenticated,)
queryset = User.objects.all()
serializer_class = UserSerializer
def list(self, request):
search_param = self.request.query_params.get('username', None)
if search_param is not None:
queryset = User.objects.filter(username__contains=search_param)
serializer = UserSerializer(queryset, many=True)
return Response(serializer.data)
```
#### File: blog/blogpost/views.py
```python
from django.shortcuts import render_to_response, get_object_or_404
from djpjax import pjax
from blogpost.models import Blogpost
def index(request):
return render_to_response('index.html', {
'posts': Blogpost.objects.all()[:5]
})
@pjax(pjax_template="pjax.html", additional_templates={"#pjax-inner-content": "pjax_inner.html"})
def view_post(request, slug):
return render_to_response('blogpost_detail.html', {
'post': get_object_or_404(Blogpost, slug=slug)
})
``` |
{
"source": "jiang-du/openpose-pytorch",
"score": 2
} |
#### File: jiang-du/openpose-pytorch/config.py
```python
stage_define = "PPPPHH"
num_stages = len(stage_define)
# 设置batchsize取决于GPU显存,大致上GTX1080对应batchsize=8, Titan RTX对应32左右
batch_size = 48
num_epochs = 150
# ----- 优化器设置 -----
learning_rate = 0.004 # 1.0
weight_decay = 0.0
momentum = 0.9
nesterov = True
# ----- 杂项设置 -----
multi_gpu_train = 1
# disable_continue_train: 打开这个选项将会导致网络从VGG开始train,而不是从上次的结果开始。
# 默认值是True,初次使用之后可以改为False
disable_continue_train = False
# 随机初始化。这项建议不要改,否则很难训练你懂的
train_from_random = False
# 如果GPU使用率太低,可以适当调高一点loader_workers
loader_workers = 8
num_image_pretrain = 8000
print_freq = 20
# ----- 文件相关路径设置 -----
pre_model_name = "pre_model.pth"
model_save_filename = './openpose_vgg19.pth'
import platform
# ----- COCO数据集路径设置 -----
# 为了方便跨平台共享数据集文件,可以分别对Windows和Linux设置不同的COCO路径
if platform.system() == 'Linux':
DATA_DIR = '/home/ai-lab/code/datasets/coco'
elif platform.system() == 'Windows':
DATA_DIR = 'H:/dataset/MSCOCO'
else:
DATA_DIR = '~/MSCOCO'
raise Exception("Unknown operating system.")
# 生成COCO路径
import os
ANNOTATIONS_TRAIN = [os.path.join(DATA_DIR, 'annotations', item) for item in ['person_keypoints_train2017.json']]
ANNOTATIONS_VAL = os.path.join(DATA_DIR, 'annotations', 'person_keypoints_val2017.json')
IMAGE_DIR_TRAIN = os.path.join(DATA_DIR, 'images/train2017')
IMAGE_DIR_VAL = os.path.join(DATA_DIR, 'images/val2017')
def generate_codec(stage_define):
stage_codec = list()
for c in stage_define:
# 强制字符串里面只能使用P和H
assert ((ord(c) == 80) or (ord(c) == 72)) # 80--P, 72--H
# 强制类型转换 P--1, H--0
stage_codec.append((ord(c) - 72) // 8)
return stage_codec
``` |
{
"source": "jiange91/cerebro-system",
"score": 2
} |
#### File: backend/spark/backend.py
```python
from __future__ import absolute_import
import io
import math
import os
import threading
import time
import gc
import datetime
import h5py
import numpy as np
import pyspark
import tensorflow as tf
from six.moves import queue
from . import service_driver, service_task, util
from .. import constants
from .. import timeout, settings as spark_settings, secret, host_hash, job_id
from ..backend import Backend
PETASTORM_HDFS_DRIVER = constants.PETASTORM_HDFS_DRIVER
TOTAL_BUFFER_MEMORY_CAP_GIB = constants.TOTAL_BUFFER_MEMORY_CAP_GIB
BYTES_PER_GIB = constants.BYTES_PER_GIB
def default_num_workers():
spark_context = pyspark.SparkContext._active_spark_context
return spark_context.defaultParallelism
class KerasStepCounter(tf.keras.callbacks.Callback):
"""Helper callback to count the number of step in sub-epoch training"""
def __init__(self):
self.counter = 0
def on_train_batch_begin(self, batch, logs={}):
self.counter += 1
def on_test_batch_begin(self, batch, logs={}):
self.counter += 1
def get_step_count(self):
return self.counter
class SparkBackend(Backend):
"""Spark backend implementing Cerebro model hopping
:param spark_context: Spark context
:param num_workers: Number of Cerebro workers. Defaults to `spark.default.parallelism`.
:param start_timeout: Timeout for Spark tasks to spawn, register and start running the code, in seconds.
If it is not set as well, defaults to 600 seconds.
:param disk_cache_size_gb: Size of the disk data cache in GBs (default 10GB).
:param data_readers_pool_type: Data readers pool type ('process' or 'thread') (default 'thread')
:param num_data_readers: Number of data readers (default 10)
:param nics: List of NIC names, will only use these for communications. If None is specified, use any
available networking interfaces (default None)
:param verbose: Debug output verbosity (0-2). Defaults to 1.
"""
def __init__(self, spark_context=None, num_workers=None, start_timeout=600, disk_cache_size_gb=10,
data_readers_pool_type='thread', num_data_readers=10,
nics=None, verbose=1):
tmout = timeout.Timeout(start_timeout,
message='Timed out waiting for {activity}. Please check that you have '
'enough resources to run all Cerebro processes. Each Cerebro '
'process runs in a Spark task. You may need to increase the '
'start_timeout parameter to a larger value if your Spark resources '
'are allocated on-demand.')
settings = spark_settings.Settings(verbose=verbose,
key=secret.make_secret_key(),
timeout=tmout,
disk_cache_size_bytes=disk_cache_size_gb * constants.BYTES_PER_GIB,
data_readers_pool_type=data_readers_pool_type,
num_data_readers=num_data_readers,
nics=nics)
if spark_context is None:
spark_context = pyspark.SparkContext._active_spark_context
if spark_context is None:
raise Exception('Could not find an active SparkContext, are you '
'running in a PySpark session?')
self.spark_context = spark_context
if num_workers is None:
num_workers = spark_context.defaultParallelism
if settings.verbose >= 1:
print('CEREBRO => Time: {}, Running {} Workers (inferred from spark.default.parallelism)'.format(
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), num_workers))
else:
if settings.verbose >= 1:
print('CEREBRO => Time: {}, Running {} Workers'.format(datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"), num_workers))
settings.num_workers = num_workers
self.settings = settings
self.workers_initialized = False
self.task_clients = None
self.driver = None
self.driver_client = None
self.spark_job_group = None
self.data_loaders_initialized = False
self.rand = np.random.RandomState(constants.RANDOM_SEED)
def initialize_workers(self):
"""Initializes Cerebro workers"""
result_queue = queue.Queue(1)
spark_job_group = 'cerebro.spark.run.%d' % job_id.next_job_id()
driver = service_driver.SparkDriverService(self.settings.num_workers, self.settings.key, self.settings.nics)
driver_client = service_driver.SparkDriverClient(driver.addresses(), self.settings.key, self.settings.verbose)
_make_spark_thread(self.spark_context, spark_job_group, driver, result_queue, self.settings)
driver.wait_for_initial_registration(self.settings.timeout)
if self.settings.verbose >= 2:
print('Initial Spark task registration is complete.')
task_clients = [service_task.SparkTaskClient(index,
driver.task_addresses_for_driver(index),
self.settings.key, self.settings.verbose) for index in
range(self.settings.num_workers)]
for task_client in task_clients:
task_client.notify_initial_registration_complete()
# setting local index for each task on the corresponding worker for GPU pinning (if needed)
host_hashes = driver.task_host_hash_indices()
for host_hash in host_hashes:
for i, task_index in enumerate(host_hashes[host_hash]):
task_clients[task_index].set_local_task_index(i)
self.driver = driver
self.driver_client = driver_client
self.task_clients = task_clients
self.spark_job_group = spark_job_group
self.workers_initialized = True
def initialize_data_loaders(self, store, dataset_idx, schema_fields):
"""
:param store:
:param dataset_idx:
:param schema_fields:
"""
if self.workers_initialized:
remote_store = store.to_remote(self.spark_job_group, dataset_idx)
shard_count = self._num_workers()
_, _, _, avg_row_size = util.get_simple_meta_from_parquet(store, schema_fields, None, dataset_idx)
data_readers_fn = _data_readers_fn(remote_store, shard_count, schema_fields, avg_row_size,
self.settings.disk_cache_size_bytes,
self.settings.data_readers_pool_type, self.settings.num_data_readers)
for task_client in self.task_clients:
task_client.initialize_data_loaders(data_readers_fn)
self.data_loaders_initialized = False
self.data_readers_fn = data_readers_fn
else:
raise Exception('Spark tasks not initialized for Cerebro. Please run SparkBackend.initialize_workers() '
'first!')
def train_for_one_epoch(self, models, store, dataset_idx, feature_col, label_col, is_train=True):
sub_epoch_trainers = [_get_remote_trainer(model, self, store, dataset_idx, feature_col, label_col,
self.settings.verbose) \
for model in models]
model_worker_pairs = [(i, j) for i in range(len(models)) for j in range(self._num_workers())]
# take a random ordering
self.rand.shuffle(model_worker_pairs)
model_states = {i: False for i in range(len(models))}
worker_states = {i: False for i in range(self._num_workers())}
model_on_worker = [-1 for _ in range(self._num_workers())]
model_results = {model.getRunId(): None for model in models}
model_sub_epoch_steps = {model.getRunId(): None for model in models}
while len(model_worker_pairs) > 0:
for w in range(self._num_workers()):
# worker idle
if not worker_states[w]:
m = _get_runnable_model(w, model_worker_pairs, model_states, is_train)
if m != -1:
# runnable model found
self.task_clients[w].execute_sub_epoch(
fn=sub_epoch_trainers[m], train=is_train, initial_epoch=models[m].getEpochs())
model_states[m] = True
worker_states[w] = True
model_on_worker[w] = m
else:
m = model_on_worker[w]
if m != -1:
status = self.task_clients[w].sub_epoch_completed()
if status.flag:
# sub-epoch completed
model_worker_pairs.remove((m, w))
model_states[m] = False
worker_states[w] = False
model_on_worker[w] = -1
if status.sub_epoch_result['status'] == 'FAILED':
# Application Error
self.teardown_workers()
raise Exception(status.sub_epoch_result['error'])
else:
res, steps = status.sub_epoch_result['result']
run_id = models[m].getRunId()
if model_results[run_id] is None:
model_results[run_id] = res
model_sub_epoch_steps[run_id] = [steps]
else:
for k in model_results[run_id]:
model_results[run_id][k].append(res[k][0])
model_sub_epoch_steps[run_id].append(steps)
time.sleep(self.settings.polling_period)
# incrementing the model epoch number
if is_train:
for model in models:
model.setEpochs(model.getEpochs() + 1)
# aggregating the model metrics
for run_id in model_results:
res = model_results[run_id]
steps = model_sub_epoch_steps[run_id]
for k in res:
res[k] = (np.sum([rk * steps[i] for i, rk in enumerate(res[k])]) / np.sum(steps))
return model_results
def teardown_workers(self):
"""Teardown Spark tasks"""
for task_client in self.task_clients:
task_client.notify_workload_complete()
self.workers_initialized = False
self.data_loaders_initialized = False
def get_metadata_from_parquet(self, store, label_columns=['label'], feature_columns=['features']):
"""
Get metadata from the data in the persistent storage.
:param store:
:param label_columns:
:param feature_columns:
:return:
"""
return util.get_simple_meta_from_parquet(store, label_columns + feature_columns)
def prepare_data(self, store, dataset, validation, label_columns=['label'], feature_columns=['features'],
num_partitions=None, parquet_row_group_size_mb=8, dataset_idx=None):
"""
Prepare data by writing out into persistent storage
:param store: Cerebro storage object (e.g., LocalStorage, HDFSStorage).
:param dataset: Spark DataFrame.
:param validation: Fraction of validation data (e.g., 0.25) or name of the DataFrame column indicating validation.
:param label_columns: List of label/output columns (default=['label']).
:param feature_columns: List of feature columns (default=['features']).
:param num_partitions: Number of data partitions of the output. If None, will default to the current number of
input dataset partitions.
:param parquet_row_group_size_mb: Parquet row group size in MBs (default 8 MB) .
:param dataset_idx: Dataset index if storing multiple datasets in the same directory.
"""
return util.prepare_data(self._num_workers(), store, dataset, label_columns, feature_columns, validation,
num_partitions=num_partitions, dataset_idx=dataset_idx,
parquet_row_group_size_mb=parquet_row_group_size_mb, verbose=self.settings.verbose)
def _num_workers(self):
"""
Get number of processes/tasks
:return:
"""
return self.settings.num_workers
def _get_runnable_model(worker, model_worker_pairs, model_states, is_train):
for m, w in model_worker_pairs:
# worker matches and model idle
if is_train:
if w == worker and not model_states[m]:
return m
else:
if w == worker:
return m
return -1
def _get_remote_trainer(estimator, backend, store, dataset_idx, feature_columns, label_columns, verbose=0):
train_rows, val_rows, metadata, avg_row_size = \
util.get_simple_meta_from_parquet(store,
schema_cols=label_columns + feature_columns,
sample_weight_col=None,
dataset_idx=dataset_idx)
estimator._check_params(metadata)
keras_utils = estimator._get_keras_utils()
run_id = estimator.getRunId()
# checkpointing the model if it does not exist
if not estimator._has_checkpoint(run_id):
model = estimator._compile_model(keras_utils)
remote_store = store.to_remote(run_id, dataset_idx)
with remote_store.get_local_output_dir() as run_output_dir:
ckpt_file = os.path.join(run_output_dir, remote_store.checkpoint_filename)
model.save(ckpt_file)
remote_store.sync(run_output_dir)
trainer = sub_epoch_trainer(estimator, metadata, keras_utils, run_id, dataset_idx,
train_rows, val_rows, backend._num_workers())
return trainer
def _data_readers_fn(remote_store, shard_count, schema_fields, avg_row_size, cache_size_limit, pool_type, num_readers):
def _data_readers(index):
from petastorm import make_reader
PETASTORM_HDFS_DRIVER = constants.PETASTORM_HDFS_DRIVER
train_reader = make_reader(remote_store.train_data_path, shuffle_row_groups=False, num_epochs=None,
cur_shard=index,
shard_count=shard_count,
hdfs_driver=PETASTORM_HDFS_DRIVER,
schema_fields=schema_fields,
reader_pool_type=pool_type, workers_count=num_readers,
cache_type='local-disk',
cache_size_limit=cache_size_limit,
cache_row_size_estimate=avg_row_size,
cache_extra_settings={'cleanup': True})
if remote_store.val_data_path != '' and remote_store.val_data_path is not None:
val_reader = make_reader(remote_store.val_data_path, shuffle_row_groups=False, num_epochs=None,
cur_shard=index,
shard_count=shard_count,
hdfs_driver=PETASTORM_HDFS_DRIVER,
schema_fields=schema_fields,
reader_pool_type=pool_type, workers_count=num_readers,
cache_type='local-disk',
cache_size_limit=cache_size_limit,
cache_row_size_estimate=avg_row_size,
cache_extra_settings={'cleanup': True})
else:
val_reader = None
return train_reader, val_reader
return _data_readers
def _make_spark_thread(spark_context, spark_job_group, driver, result_queue,
settings):
"""Creates `settings.num_workers` Spark tasks in a parallel thread."""
def run_spark():
"""Creates `settings.num_workers` Spark tasks, each executing `_task_fn` and waits for them to terminate."""
try:
spark_context.setJobGroup(spark_job_group,
"Cerebro Spark Run",
interruptOnCancel=True)
procs = spark_context.range(0, numSlices=settings.num_workers)
# We assume that folks caring about security will enable Spark RPC
# encryption, thus ensuring that key that is passed here remains
# secret.
result = procs.mapPartitionsWithIndex(_make_mapper(driver.addresses(), settings)).collect()
result_queue.put(result)
except:
driver.notify_spark_job_failed()
raise
spark_thread = threading.Thread(target=run_spark)
spark_thread.start()
return spark_thread
def _make_mapper(driver_addresses, settings):
def _mapper(index, _):
try:
# https://www.google.com/search?q=keras+model+save+resource+temporarily+unavailable&oq=keras\
# +mode&aqs=chrome.0.69i59l2j69i57j69i59j69i60l3j69i65.3390j0j4&sourceid=chrome&ie=UTF-8
import os
os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'
task = service_task.SparkTaskService(index, settings.key, settings.nics)
driver_client = service_driver.SparkDriverClient(driver_addresses, settings.key, settings.verbose)
driver_client.register_task(index, task.addresses(), host_hash.host_hash())
task.wait_for_initial_registration(settings.timeout)
task.wait_for_workload_completion()
yield 0
finally:
task.shutdown()
return _mapper
def sub_epoch_trainer(estimator, metadata, keras_utils, run_id, dataset_idx, train_rows, val_rows,
num_workers):
# Estimator parameters
label_columns = estimator.getLabelCols()
feature_columns = estimator.getFeatureCols()
user_callbacks = estimator.getCallbacks()
batch_size = estimator.getBatchSize()
sample_weight_col = estimator.getSampleWeightCol()
custom_objects = estimator.getCustomObjects()
user_shuffle_buffer_size = estimator.getShufflingBufferSize()
metrics_names = [name.__name__ if callable(name) else name for name in estimator.getMetrics()]
user_verbose = estimator.getVerbose()
# Model parameters
input_shapes, output_shapes = estimator.get_model_shapes()
# print(input_shapes)
output_names = estimator.getModel().output_names
floatx = tf.keras.backend.floatx()
make_dataset = keras_utils.make_dataset_fn(
feature_columns, label_columns, sample_weight_col, metadata,
input_shapes, output_shapes, output_names, batch_size)
fit_sub_epoch_fn = keras_utils.fit_sub_epoch_fn()
eval_sub_epoch_fn = keras_utils.eval_sub_epoch_fn()
transformation_fn = estimator.getTransformationFn()
transformation = transformation_fn if transformation_fn else None
# Utility functions
deserialize_keras_model = _deserialize_keras_model_fn()
calculate_shuffle_buffer_size = _calculate_shuffle_buffer_size_fn()
pin_gpu = _pin_gpu_fn()
# Storage
store = estimator.getStore()
remote_store = store.to_remote(run_id, dataset_idx)
def train(data_reader, is_train, starting_epoch, local_task_index=0):
begin_time = time.time()
tf.keras.backend.set_floatx(floatx)
pin_gpu(local_task_index)
# FIXME: Enable sub-epoch data shuffling
# if not user_shuffle_buffer_size:
# shuffle_buffer_size = calculate_shuffle_buffer_size(
# hvd, avg_row_size, train_rows / num_workers)
# else:
# shuffle_buffer_size = user_shuffle_buffer_size
if not user_shuffle_buffer_size:
shuffle_buffer_size = 1024 * 3
else:
shuffle_buffer_size = user_shuffle_buffer_size
# # Verbose mode 1 will print a progress bar
verbose = user_verbose
with remote_store.get_local_output_dir() as run_output_dir:
step_counter_callback = KerasStepCounter()
callbacks = [step_counter_callback]
callbacks = callbacks + user_callbacks
ckpt_file = os.path.join(run_output_dir, remote_store.checkpoint_filename)
# restoring the model from the previous chckpoint
with tf.keras.utils.custom_object_scope(custom_objects):
# model = tf.keras.models.load_model(remote_store.checkpoint_path)
model = deserialize_keras_model(
remote_store.get_last_checkpoint(), lambda x: tf.keras.models.load_model(x))
steps_per_epoch = int(math.ceil(train_rows / batch_size / num_workers))
# math.ceil because if val_rows is smaller than batch_size we still get the at least
# one step. float(val_rows) because val_rows/batch_size evaluates to zero before
# math.ceil
validation_steps = int(math.ceil(float(val_rows) / batch_size / num_workers))
schema_fields = feature_columns + label_columns
if sample_weight_col:
schema_fields.append(sample_weight_col)
if is_train:
train_data = make_dataset(data_reader, shuffle_buffer_size, shuffle=False)
initialization_time = time.time() - begin_time
begin_time = time.time()
result = fit_sub_epoch_fn(starting_epoch, model, train_data, steps_per_epoch, callbacks,
verbose).history
training_time = time.time() - begin_time
begin_time = time.time()
result = {'train_' + name: result[name] for name in result}
model.save(ckpt_file)
else:
val_data = make_dataset(data_reader, shuffle_buffer_size, shuffle=False)
initialization_time = time.time() - begin_time
begin_time = time.time()
result = eval_sub_epoch_fn(starting_epoch, model, val_data, validation_steps, callbacks, verbose)
training_time = time.time() - begin_time
begin_time = time.time()
result = [[x] for x in result]
result = {k: v for k, v in zip(['val_loss'] + ['val_' + name for name in metrics_names], result)}
del model
gc.collect()
tf.keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
remote_store.sync(run_output_dir)
finalization_time = time.time() - begin_time
if verbose >= 1:
print('CEREBRO => Time: {}, Model: {}, Mode: {}, Initialization Time: {}, Training Time: {}, '
'Finalization Time: {}'.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
run_id, 'TRAIN' if is_train else 'VALID', initialization_time, training_time, finalization_time))
return result, step_counter_callback.get_step_count()
return train
def _deserialize_keras_model_fn():
def deserialize_keras_model(model_bytes, load_model_fn):
"""Deserialize model from byte array encoded in base 64."""
# model_bytes = codec.loads_base64(model_bytes)
bio = io.BytesIO(model_bytes)
with h5py.File(bio, 'r') as f:
return load_model_fn(f)
return deserialize_keras_model
def _calculate_shuffle_buffer_size_fn():
def calculate_shuffle_buffer_size(hvd, avg_row_size, train_row_count_per_worker):
"""
Determines the shuffling buffer size such that each worker gets at most 1GB for shuffling
buffer such that on a single machine, among all the workers on that machine, at most
memory_cap_gb GB are allocated for shuffling buffer. Also, it ensures that the buffer size
is identical among all the workers.
example 1:
memory_cap_gb = 4
machine1: 8 workers
machine2: 3 workers
shuffle_buffer_size = 0.5 GB
example 2:
memory_cap_gb = 4
machine1: 2 workers
machine2: 3 workers
shuffle_buffer_size = 1 GB
example 3:
memory_cap_gb = 4
machine1: 2 workers
machine2: 8 workers
machine3: 5 workers
shuffle_buffer_size = 0.5 GB
"""
local_size = hvd.local_size()
local_sizes = hvd.allgather([local_size])
max_local_size = max(local_sizes)
if max_local_size > TOTAL_BUFFER_MEMORY_CAP_GIB:
shuffle_buffer_size = TOTAL_BUFFER_MEMORY_CAP_GIB * BYTES_PER_GIB / avg_row_size / max_local_size
else:
shuffle_buffer_size = BYTES_PER_GIB / avg_row_size
return int(min(shuffle_buffer_size, train_row_count_per_worker))
return calculate_shuffle_buffer_size
def _pin_gpu_fn():
def fn(local_task_index):
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[local_task_index], 'GPU')
return fn
def _pin_cpu_fn():
def fn():
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
tf.config.threading.set_inter_op_parallelism_threads(1)
tf.config.threading.set_intra_op_parallelism_threads(1)
return fn
```
#### File: backend/spark/service_task.py
```python
import pyspark
import socket
import threading
import traceback
import tensorflow as tf
from distutils.version import LooseVersion
import psutil
from six.moves import queue, socketserver
from .service_common import find_port, PingResponse, PingRequest, NoValidAddressesFound, AckResponse, Wire
class SetLocalTaskIndexRequest(object):
def __init__(self, local_task_index):
self.local_task_index = local_task_index
"""Local rank of the task"""
class InitDataLoadersRequest(object):
def __init__(self, initialize_data_loaders_fn):
self.initialize_data_loaders_fn = initialize_data_loaders_fn
class ExecuteSubEpochRequest(object):
def __init__(self, sub_epoch_fn, train, initial_epoch):
self.sub_epoch_fn = sub_epoch_fn
self.is_train = train
self.initial_epoch = initial_epoch
class SubEpochCompletedRequest(object):
"""Is command execution finished?"""
pass
class SubEpochCompletedResponse(object):
def __init__(self, flag, sub_epoch_result):
self.flag = flag
"""Yes/no"""
self.sub_epoch_result = sub_epoch_result
"""RUNNING/FAILED/COMPLETED and sub-epoch result"""
class NotifyInitialRegistrationCompleteRequest(object):
"""Notification that initial task registration has completed."""
pass
class NotifyWorkloadCompleteRequest(object):
"""Notification that the workload has completed."""
pass
class SparkTaskService:
NAME_FORMAT = 'task service #%d'
SERVICE_ENV_KEYS = ['HADOOP_TOKEN_FILE_LOCATION']
def __init__(self, index, key, nics):
# disabling eager
tf.compat.v1.disable_eager_execution()
service_name = SparkTaskService.NAME_FORMAT % index
self._index = index
self._service_name = service_name
self._wire = Wire(key)
self._nics = nics
self._server, _ = find_port(
lambda addr: socketserver.ThreadingTCPServer(
addr, self._make_handler()))
self._port = self._server.socket.getsockname()[1]
self._addresses = self._get_local_addresses()
self._thread = threading.Thread(target=self._server.serve_forever)
self._thread.daemon = True
self._thread.start()
service_env_keys = SparkTaskService.SERVICE_ENV_KEYS
self.local_task_index = 0
self._initial_registration_complete = False
self._workload_complete = False
self._wait_cond = threading.Condition()
self._service_env_keys = service_env_keys
self._sub_epoch_thread = None
self._sub_epoch_status = None
self._train_reader = None
self._val_reader = None
def _make_handler(self):
server = self
class _Handler(socketserver.StreamRequestHandler):
def handle(self):
try:
req = server._wire.read(self.rfile)
resp = server._handle(req, self.client_address)
if not resp:
raise Exception('Handler did not return a response.')
server._wire.write(resp, self.wfile)
except EOFError:
# Happens when client is abruptly terminated, don't want to pollute the logs.
pass
return _Handler
def _get_local_addresses(self):
result = {}
for intf, intf_addresses in psutil.net_if_addrs().items():
if self._nics and intf not in self._nics:
continue
for addr in intf_addresses:
if addr.family == socket.AF_INET:
if intf not in result:
result[intf] = []
result[intf].append((addr.address, self._port))
if not result and self._nics:
raise NoValidAddressesFound(
'No available network interface found matching user provided interface: {}'.format(self._nics))
return result
def addresses(self):
return self._addresses
def shutdown(self):
self._server.shutdown()
self._server.server_close()
self._thread.join()
def get_port(self):
return self._port
def wait_for_initial_registration(self, timeout):
self._wait_cond.acquire()
try:
while not self._initial_registration_complete:
self._wait_cond.wait(timeout.remaining())
timeout.check_time_out_for('tasks to start')
finally:
self._wait_cond.release()
def wait_for_workload_completion(self, timeout=5):
self._wait_cond.acquire()
try:
while not self._workload_complete:
self._wait_cond.wait(timeout)
finally:
self._wait_cond.release()
def _handle(self, req, client_address):
if isinstance(req, InitDataLoadersRequest):
self._wait_cond.acquire()
try:
self._train_reader, self._val_reader = req.initialize_data_loaders_fn(self._index)
finally:
self._wait_cond.notify_all()
self._wait_cond.release()
return AckResponse()
if isinstance(req, SetLocalTaskIndexRequest):
self.local_task_index = req.local_task_index
return AckResponse()
if isinstance(req, ExecuteSubEpochRequest):
self._wait_cond.acquire()
try:
if self._sub_epoch_thread is None or not self._sub_epoch_thread.is_alive():
self._sub_epoch_status = None
def bg_execute(fn, is_train, initial_epoch):
try:
self._sub_epoch_status = {"status": "RUNNING", "result": None}
if is_train:
reader = self._train_reader
else:
reader = self._val_reader
func_result = fn(reader, is_train, initial_epoch,
local_task_index=self.local_task_index)
self._sub_epoch_status = {"status": "COMPLETED", "result": func_result}
except Exception as e:
self._sub_epoch_status = {"status": "FAILED", "result": None,
"error": str(e) + "\n" + traceback.format_exc()}
self._sub_epoch_thread = threading.Thread(target=bg_execute, args=(req.sub_epoch_fn, req.is_train,
req.initial_epoch))
self._sub_epoch_thread.start()
finally:
self._wait_cond.notify_all()
self._wait_cond.release()
return AckResponse()
if isinstance(req, SubEpochCompletedRequest):
self._wait_cond.acquire()
try:
terminated = (self._sub_epoch_thread is not None and
not self._sub_epoch_thread.is_alive())
finally:
self._wait_cond.release()
return SubEpochCompletedResponse(terminated, self._sub_epoch_status)
if isinstance(req, NotifyInitialRegistrationCompleteRequest):
self._wait_cond.acquire()
try:
self._initial_registration_complete = True
finally:
self._wait_cond.notify_all()
self._wait_cond.release()
return AckResponse()
if isinstance(req, NotifyWorkloadCompleteRequest):
self._wait_cond.acquire()
try:
self._workload_complete = True
finally:
self._wait_cond.notify_all()
self._wait_cond.release()
return AckResponse()
if isinstance(req, PingRequest):
return PingResponse(self._service_name, client_address[0])
raise NotImplementedError(req)
def _get_resources(self):
if LooseVersion(pyspark.__version__) >= LooseVersion('3.0.0'):
from pyspark import TaskContext
return TaskContext.get().resources()
return dict()
class SparkTaskClient:
def __init__(self, index, task_addresses, key, verbose, match_intf=False, probe_timeout=20, retries=3):
service_name = SparkTaskService.NAME_FORMAT % index
self._verbose = verbose
self._service_name = service_name
self._wire = Wire(key)
self._match_intf = match_intf
self._probe_timeout = probe_timeout
self._retries = retries
self._addresses = self._probe(task_addresses)
if not self._addresses:
raise NoValidAddressesFound(
'Cerebro was unable to connect to {service_name} on any '
'of the following addresses: {addresses}.\n\n'
'One possible cause of this problem is that '
'Cerebro currently requires every host to have at '
'least one routable network interface with the same '
'name across all of the hosts. '
'You can run \"ifconfig -a\" '
'on every host and check for the common '
'routable interface. '
'To fix the problem, you can rename interfaces on '
'Linux.'.format(service_name=service_name, addresses=task_addresses))
def _probe(self, addresses):
result_queue = queue.Queue()
threads = []
for intf, intf_addresses in addresses.items():
for addr in intf_addresses:
thread = threading.Thread(target=self._probe_one,
args=(intf, addr, result_queue))
thread.daemon = True
thread.start()
threads.append(thread)
for t in threads:
t.join()
result = {}
while not result_queue.empty():
intf, addr = result_queue.get()
if intf not in result:
result[intf] = []
result[intf].append(addr)
return result
def _probe_one(self, intf, addr, result_queue):
for iter in range(self._retries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self._probe_timeout)
try:
sock.connect(addr)
rfile = sock.makefile('rb')
wfile = sock.makefile('wb')
try:
self._wire.write(PingRequest(), wfile)
resp = self._wire.read(rfile)
if resp.service_name != self._service_name:
return
if self._match_intf:
# Interface name of destination and source must match
# since `match_intf` is requested.
client_intf_addrs = [x.address
for x in psutil.net_if_addrs().get(intf, [])
if x.family == socket.AF_INET]
if resp.source_address not in client_intf_addrs:
if self._verbose >= 2:
# Need to find the local interface name whose
# address was visible to the target host's server.
resp_intf = ''
for key in psutil.net_if_addrs().keys():
key_intf_addrs = [x.address
for x in psutil.net_if_addrs().get(key, [])]
if resp.source_address in key_intf_addrs:
resp_intf = key
break
print('WARNING: Expected to connect the host '
'{addr} using interface '
'{intf}, but reached it on interface '
'{resp_intf}.'.format(
addr=str(addr[0]) + ':' + str(addr[1]),
intf=intf,
resp_intf=resp_intf))
return
result_queue.put((intf, addr))
return
finally:
rfile.close()
wfile.close()
except:
pass
finally:
sock.close()
def _send_one(self, addr, req):
for iter in range(self._retries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(addr)
rfile = sock.makefile('rb')
wfile = sock.makefile('wb')
try:
self._wire.write(req, wfile)
resp = self._wire.read(rfile)
return resp
finally:
rfile.close()
wfile.close()
except:
if iter == self._retries - 1:
# Raise exception on the last retry.
raise
finally:
sock.close()
def _send(self, req):
# Since all the addresses were vetted, use the first one.
addr = list(self._addresses.values())[0][0]
return self._send_one(addr, req)
def addresses(self):
return self._addresses
def notify_initial_registration_complete(self):
self._send(NotifyInitialRegistrationCompleteRequest())
def notify_workload_complete(self):
self._send(NotifyWorkloadCompleteRequest())
def initialize_data_loaders(self, fn):
self._send(InitDataLoadersRequest(fn))
def execute_sub_epoch(self, fn, train=True, initial_epoch=0):
self._send(ExecuteSubEpochRequest(fn, train, initial_epoch))
def sub_epoch_completed(self):
return self._send(SubEpochCompletedRequest())
def set_local_task_index(self, local_task_index):
return self._send(SetLocalTaskIndexRequest(local_task_index))
```
#### File: nas/tuners/base.py
```python
import keras_tuner
import copy
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from keras_tuner.engine import trial as trial_lib, tuner
import keras_tuner
import numpy as np
class CerebroOracle(keras_tuner.Oracle):
"""
Base Oracle that supports multiple trial configuration
Args:
objective: A string or `keras_tuner.Objective` instance. If a string,
the direction of the optimization (min or max) will be inferred.
max_trials: Integer, the total number of trials (model configurations)
to test at most. Note that the oracle may interrupt the search
before `max_trial` models have been tested if the search space has
been exhausted.
hyperparameters: Optional `HyperParameters` instance. Can be used to
override (or register in advance) hyperparameters in the search
space.
tune_new_entries: Boolean, whether hyperparameter entries that are
requested by the hypermodel but that were not specified in
`hyperparameters` should be added to the search space, or not. If
not, then the default value for these parameters will be used.
Defaults to True.
allow_new_entries: Boolean, whether the hypermodel is allowed to
request hyperparameter entries not listed in `hyperparameters`.
Defaults to True.
seed: Int. Random seed.
"""
def __init__(
self,
objective,
max_trials=None,
hyperparameters=None,
allow_new_entries=True,
tune_new_entries=True,
seed=None
):
super().__init__(
objective,
max_trials=max_trials,
hyperparameters=hyperparameters, allow_new_entries=allow_new_entries, tune_new_entries=tune_new_entries,
seed=seed
)
def _init_search_space(self):
"""
Init the entire search space for the hyperparameters
This function should be called in the Tuner.search since parameters for the architecture are now known until seeing the data
"""
raise NotImplementedError
def create_trials(self, n):
raise NotImplementedError
def end_trial(self, trial_id, status="COMPLETED"):
trial = None
for i, t in enumerate(self._running_trials):
if t.trial_id == trial_id:
trial = self._running_trials.pop(i)
break
if not trial:
raise ValueError("Ongoing trial with id: {} not found.".format(trial_id))
trial.status = status
if status == trial_lib.TrialStatus.COMPLETED:
self.score_trial(trial)
self.end_order.append(trial_id)
self._save_trial(trial)
self.save()
``` |
{
"source": "JiangengDong/CoMPNetX",
"score": 2
} |
#### File: CoMPNetX/python/models.py
```python
import torch
import torch.nn as nn
import numpy as np
class VoxelEncoder(nn.Module):
def __init__(self, input_size: int, output_size: int):
super(VoxelEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(in_channels=input_size, out_channels=16, kernel_size=[5, 5], stride=[2, 2]),
nn.PReLU(),
nn.Conv2d(in_channels=16, out_channels=8, kernel_size=[3, 3], stride=[1, 1]),
nn.PReLU(),
nn.MaxPool2d(kernel_size=[2, 2])
)
x = self.encoder(torch.autograd.Variable(torch.rand([1, input_size, input_size, input_size])))
first_fc_in_features = np.prod(x.shape[1:])
self.head = nn.Sequential(
nn.Linear(first_fc_in_features, 256),
nn.PReLU(),
nn.Linear(256, output_size)
)
def forward(self, x):
x = self.encoder(x)
x = x.view(x.shape[0], -1)
x = self.head(x)
return x
class PNet(nn.Module):
def __init__(self, input_size, output_size):
super(PNet, self).__init__()
self.fc = nn.Sequential(
nn.Linear(input_size, 896), nn.PReLU(), nn.Dropout(),
nn.Linear(896, 512), nn.PReLU(), nn.Dropout(),
nn.Linear(512, 256), nn.PReLU(), nn.Dropout(),
nn.Linear(256, 128), nn.PReLU(),
nn.Linear(128, 64), nn.PReLU(),
nn.Linear(64, output_size))
def forward(self, x):
out = self.fc(x)
return out
class EnetConstraint(nn.Module):
def __init__(self, input_size: int, output_size: int):
super(EnetConstraint, self).__init__()
self.fc = nn.Sequential(
nn.Linear(input_size, 128), nn.PReLU(), # 128
nn.Linear(128, output_size)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.fc(x)
return out
class DNet(nn.Module):
def __init__(self, input_size: int, output_size: int):
super(DNet, self).__init__()
self.fc = nn.Sequential(
nn.Linear(input_size, 256), nn.PReLU(),
nn.Linear(256, 256), nn.PReLU(),
nn.Linear(256, output_size))
def forward(self, x):
out = self.fc(x)
return out
``` |
{
"source": "JiangengDong/ECE276C",
"score": 3
} |
#### File: JiangengDong/ECE276C/draw_curve.py
```python
from matplotlib import pyplot as plt
import numpy as np
import os
def smooth(v):
v = v.copy()
n = v.shape[0]
for i in range(1, n):
v[i] = 0.6*v[i-1] + 0.4*v[i]
return v
folder = "./data/DDPG-1-shaping-rand"
csv_path = os.path.join(folder, "run-log-tag-return.csv")
img_path = os.path.join(folder, "curve.pdf")
data = np.loadtxt(csv_path, delimiter=",", skiprows=1)
t = data[:, 1]
r = data[:, 2]
r = smooth(r)
plt.plot(t, r)
plt.xlim(t[0], t[-1])
plt.ylim(2, 14)
plt.tight_layout()
plt.savefig(img_path, bbox_inches="tight")
plt.show()
```
#### File: ECE276C/juggle_env/juggle_env.py
```python
from robosuite.models import MujocoWorldBase
from robosuite.robots import SingleArm
from robosuite.models.objects import BallObject
from robosuite.utils.mjcf_utils import new_joint
from robosuite import load_controller_config
# typing
from robosuite.robots.robot import Robot
from robosuite.models.arenas import Arena
from robosuite.models.objects.generated_objects import MujocoGeneratedObject
from juggle_env.empty_arena import EmptyArena
from mujoco_py import MjSim, MjViewer
import gym
import numpy as np
from collections import OrderedDict
import glfw
class JuggleEnv:
def __init__(self):
self.control_freq: float = 50.0
self.control_timestep: float = 1.0 / self.control_freq
self.viewer = None
self.horizon = 1000
self.target = np.array([0.8, 0.0, 1.9])
# load model
self.robot: Robot = None
self.arena: Arena = None
self.pingpong: MujocoGeneratedObject = None
self.model: MujocoWorldBase = None
self._load_model()
# initialize simulation
self.mjpy_model = None
self.sim: MjSim = None
self.model_timestep: float = 0.0
self._initialize_sim()
# reset robot, object and internel variables
self.cur_time: float = 0.0
self.timestep: int = 0.0
self.done: bool = False
self._pingpong_body_id: int = -1
self._paddle_body_id: int = -1
self._reset_internel()
# internel variable for scoring
self._below_plane = False
self.plane_height = 1.5
def _load_model(self):
# Load the desired controller's default config as a dict
controller_config = load_controller_config(default_controller="JOINT_VELOCITY")
controller_config["output_max"] = 1.0
controller_config["output_min"] = -1.0
robot_noise = {
"magnitude": [0.05]*7,
"type": "gaussian"
}
self.robot = SingleArm(
robot_type="IIWA",
idn=0,
controller_config=controller_config,
initial_qpos=[0.0, 0.7, 0.0, -1.4, 0.0, -0.56, 0.0],
initialization_noise=robot_noise,
gripper_type="PaddleGripper",
gripper_visualization=True,
control_freq=self.control_freq
)
self.robot.load_model()
self.robot.robot_model.set_base_xpos([0, 0, 0])
self.arena = EmptyArena()
self.arena.set_origin([0.8, 0, 0])
self.pingpong = BallObject(
name="pingpong",
size=[0.02],
rgba=[0.8, 0.8, 0, 1],
solref=[0.1, 0.03],
solimp=[0, 0, 1],
density=100)
pingpong_model = self.pingpong.get_collision()
pingpong_model.append(new_joint(name="pingpong_free_joint", type="free"))
pingpong_model.set("pos", "0.8 0 2.0")
# merge into one
self.model = MujocoWorldBase()
self.model.merge(self.robot.robot_model)
self.model.merge(self.arena)
self.model.worldbody.append(pingpong_model)
def _initialize_sim(self):
# if we have an xml string, use that to create the sim. Otherwise, use the local model
self.mjpy_model = self.model.get_model(mode="mujoco_py")
# Create the simulation instance and run a single step to make sure changes have propagated through sim state
self.sim = MjSim(self.mjpy_model)
self.sim.step()
self.robot.reset_sim(self.sim)
self.model_timestep = self.sim.model.opt.timestep
def _reset_internel(self):
# reset robot
self.robot.setup_references()
self.robot.reset(deterministic=False)
# reset pingpong
pingpong_pos = self.target + np.random.rand(3)*0.08-0.04
pingpong_quat = np.array([1.0, 0.0, 0.0, 0.0])
self.sim.data.set_joint_qpos("pingpong_free_joint", np.concatenate([pingpong_pos, pingpong_quat]))
# get handle for important parts
self._pingpong_body_id = self.sim.model.body_name2id("pingpong")
self._paddle_body_id = self.sim.model.body_name2id("gripper0_paddle_body")
# Setup sim time based on control frequency
self.cur_time = 0
self.timestep = 0
self.done = False
def reset(self):
self.sim.reset()
self._reset_internel()
self.sim.forward()
return self._get_observation()
def _get_observation(self):
di = OrderedDict()
# get robot observation
di = self.robot.get_observations(di)
# get pingpong observation
pingpong_pos = np.array(self.sim.data.body_xpos[self._pingpong_body_id])
di["pingpong_pos"] = pingpong_pos
return di
def step(self, action: np.ndarray):
if self.done:
raise ValueError("executing action in terminated episode")
policy_step = True
score = 0.0
for _ in range(int(self.control_timestep / self.model_timestep)):
self.sim.forward()
self.robot.control(action=action, policy_step=policy_step)
# self.sim.data.ctrl[:] = action*5.0
self.sim.step()
policy_step = False
# check if the ball pass the plane
h = self.sim.data.body_xpos[self._pingpong_body_id][2]
self._below_plane |= h < self.plane_height
if self._below_plane and h > self.plane_height:
score = 1.0
self._below_plane = False
self.timestep += 1
self.cur_time += self.control_timestep
observation = self._get_observation()
dist_xy = np.linalg.norm((observation["robot0_eef_pos"] - observation["pingpong_pos"])[:2])
# paddle_height = observation["robot0_eef_pos"][2]
self.done = self.timestep >= self.horizon or dist_xy > 0.2
reward = score # + 0 * (0.2 - dist_xy)
return observation, reward, self.done, {}
def render(self, mode="human"):
if mode == "human":
self._get_viewer().render()
elif mode == "rgb_array":
img = self.sim.render(1920, 1080)
return img[::-1, :, ::-1]
def _get_viewer(self):
if self.viewer is None:
self.viewer = MjViewer(self.sim)
self.viewer.vopt.geomgroup[0] = 0
self.viewer._hide_overlay = True
return self.viewer
def close(self):
self._destroy_viewer()
def _destroy_viewer(self):
if self.viewer is not None:
glfw.destroy_window(self.viewer.window)
self.viewer = None
def seed(self):
pass
``` |
{
"source": "JiangengDong/IRLMPNet",
"score": 3
} |
#### File: IRLMPNet/python/main.py
```python
import argparse
import logging
import os
from math import inf
import json
import numpy as np
import torch
from typing import Dict, Tuple, List
from torch import nn, optim, jit
from torch.distributions import Normal
from torch.distributions.kl import kl_divergence
from torch.nn import functional as F
from torch.optim.optimizer import Optimizer
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm, trange
from envs import Car1OrderEnv, Env
from memory import ExperienceReplay
from models import (ObservationEncoder, ObservationDecoder, RewardModel,
TransitionModel, bottle)
from planner import MPCPlanner
from visual import visualize_global_map, visualize_local_map
def get_args() -> argparse.Namespace:
# Hyperparameters
parser = argparse.ArgumentParser(description='PlaNet')
parser.add_argument('--id', type=str, default='default', help='Experiment ID')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='Random seed')
parser.add_argument('--disable-cuda', action='store_true', help='Disable CUDA')
# environment configs
parser.add_argument('--max-episode-length', type=int, default=500, metavar='T', help='Max episode length')
parser.add_argument('--action-repeat', type=int, default=2, metavar='R', help='Action repeat')
parser.add_argument('--render', action='store_true', help='Render environment')
parser.add_argument('--obstacle-index', type=int, default=0, help='Obstacle index')
# replay buffer configs
parser.add_argument('--experience-size', type=int, default=10000, metavar='D', help='Experience replay size')
# network configs
parser.add_argument('--activation-function', type=str, default='relu', choices=dir(F), help='Model activation function')
parser.add_argument('--embedding-size', type=int, default=1024, metavar='E', help='Observation embedding size')
parser.add_argument('--hidden-size', type=int, default=256, metavar='H', help='Hidden size')
parser.add_argument('--belief-size', type=int, default=256, metavar='H', help='Belief/hidden size')
parser.add_argument('--state-size', type=int, default=32, metavar='Z', help='State/latent size')
# MPC planner configs
parser.add_argument('--action-noise', type=float, default=0.3, metavar='ε', help='Action noise')
parser.add_argument('--planning-horizon', type=int, default=12, metavar='H', help='Planning horizon distance')
parser.add_argument('--optimisation-iters', type=int, default=10, metavar='I', help='Planning optimisation iterations')
parser.add_argument('--candidates', type=int, default=1000, metavar='J', help='Candidate samples per iteration')
parser.add_argument('--top-candidates', type=int, default=100, metavar='K', help='Number of top candidates to fit')
# hyperparameters
parser.add_argument('--episodes', type=int, default=200, metavar='E', help='Total number of episodes')
parser.add_argument('--seed-episodes', type=int, default=5, metavar='S', help='Seed episodes')
parser.add_argument('--collect-interval', type=int, default=100, metavar='C', help='Collect interval')
parser.add_argument('--batch-size', type=int, default=10, metavar='B', help='Batch size')
parser.add_argument('--chunk-size', type=int, default=50, metavar='L', help='Chunk size')
parser.add_argument('--overshooting-distance', type=int, default=50, metavar='D', help='Latent overshooting distance/latent overshooting weight for t = 1')
parser.add_argument('--overshooting-kl-beta', type=float, default=0, metavar='β>1', help='Latent overshooting KL weight for t > 1 (0 to disable)')
parser.add_argument('--overshooting-reward-scale', type=float, default=0, metavar='R>1',
help='Latent overshooting reward prediction weight for t > 1 (0 to disable)')
parser.add_argument('--global-kl-beta', type=float, default=0, metavar='βg', help='Global KL weight (0 to disable)')
parser.add_argument('--free-nats', type=float, default=3, metavar='F', help='Free nats')
parser.add_argument('--learning-rate', type=float, default=1e-3, metavar='α', help='Learning rate')
parser.add_argument('--learning-rate-schedule', type=int, default=0, metavar='αS',
help='Linear learning rate schedule (optimisation steps from 0 to final learning rate; 0 to disable)')
parser.add_argument('--adam-epsilon', type=float, default=1e-4, metavar='ε', help='Adam optimiser epsilon value')
parser.add_argument('--grad-clip-norm', type=float, default=1000, metavar='C', help='Gradient clipping norm')
# test configs
parser.add_argument('--test', action='store_true', help='Test only')
parser.add_argument('--test-interval', type=int, default=25, metavar='I', help='Test interval (episodes)')
parser.add_argument('--test-episodes', type=int, default=1, metavar='E', help='Number of test episodes')
# checkpoint configs
parser.add_argument('--checkpoint-interval', type=int, default=50, metavar='I', help='Checkpoint interval (episodes)')
parser.add_argument('--checkpoint-experience', action='store_true', help='Checkpoint experience replay')
# continue training
parser.add_argument('--result-dir', type=str, help='Default value is set according to ID. Override with this option.')
parser.add_argument('--checkpoint-dir', type=str, help='Default value is set according to ID. Override with this option.')
parser.add_argument('--checkpoint-path', type=str, help='Default value is set according to ID. Override with this option.')
parser.add_argument('--tensorboard-dir', type=str, help='Default value is set according to ID. Override with this option.')
parser.add_argument('--torchscript-dir', type=str, help='Default value is set according to ID. Override with this option.')
parser.add_argument("--video-dir", type=str, help='Default value is set according to ID. Override with this option.')
args = parser.parse_args()
return args
def postprocess_args(args: argparse.Namespace) -> argparse.Namespace:
# check validity of args and add additional args
args.overshooting_distance = min(args.chunk_size, args.overshooting_distance)
if torch.cuda.is_available() and not args.disable_cuda:
args.device = torch.device('cuda')
else:
args.device = torch.device('cpu')
args.result_dir = args.result_dir or os.path.join('data/car1order/rl_result', args.id)
args.checkpoint_dir = args.checkpoint_dir or os.path.join(args.result_dir, "checkpoint")
args.tensorboard_dir = args.tensorboard_dir or os.path.join(args.result_dir, "tensorboard")
args.torchscript_dir = args.torchscript_dir or os.path.join(args.result_dir, "torchscript")
args.video_dir = args.video_dir or os.path.join(args.result_dir, "video")
record_path = os.path.join(args.checkpoint_dir, "checkpoint")
if os.path.exists(record_path):
with open(record_path) as f:
args.checkpoint_path = args.checkpoint_path or os.path.join(args.checkpoint_dir, f.readline().rstrip('\n'))
print('Options')
for k, v in vars(args).items():
print('\t' + k + ': ' + str(v))
return args
def save_args(args: argparse.Namespace):
args_dict = args.__dict__.copy()
args_dict.pop("device")
with open(os.path.join(args.result_dir, "args.json"), "w") as f:
json.dump(args_dict, f, indent=2)
def setup_workdir(args: argparse.Namespace):
os.makedirs(args.result_dir, exist_ok=True)
os.makedirs(args.checkpoint_dir, exist_ok=True)
os.makedirs(args.torchscript_dir, exist_ok=True)
os.makedirs(args.video_dir, exist_ok=True)
os.makedirs(args.tensorboard_dir, exist_ok=True)
for filename in os.listdir(args.tensorboard_dir):
os.remove(os.path.join(args.tensorboard_dir, filename))
def setup_seed(args: argparse.Namespace):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.device == torch.device('cuda'):
torch.cuda.manual_seed(args.seed)
# TODO: set dynamic system/env seed here
def setup_env(args: argparse.Namespace) -> Env:
env = Car1OrderEnv(
args.max_episode_length,
args.action_repeat
)
return env
def setup_replay(args: argparse.Namespace, env: Env) -> ExperienceReplay:
D = ExperienceReplay(
args.experience_size,
env.observation_size,
env.action_size,
args.device
)
# Initialise dataset D with random seed episodes
for _ in range(1, args.seed_episodes + 1):
observation, done = env.reset(), False
while not done:
action = env.sample_random_action()
next_observation, _, done, info = env.step(action)
D.append(observation, action, info["reward_dist"], info["reward_coll"], done)
observation = next_observation
return D
def setup_models(args: argparse.Namespace, env: Env) -> Tuple[Tuple[nn.Module, nn.Module, nn.Module, nn.Module],
Tuple[optim.Optimizer, optim.Optimizer],
List[torch.nn.parameter.Parameter]]:
# Initialise model parameters randomly
transition_model = TransitionModel(
args.belief_size,
args.state_size,
env.action_size,
args.hidden_size,
args.embedding_size,
args.activation_function
).to(device=args.device)
observation_model = ObservationDecoder(
args.belief_size,
args.state_size,
args.embedding_size
).to(device=args.device)
reward_model = RewardModel(
args.belief_size,
args.state_size,
args.hidden_size
).to(device=args.device)
encoder = ObservationEncoder(
args.embedding_size,
).to(device=args.device)
param_list = (
list(transition_model.parameters()) +
list(observation_model.parameters()) +
list(encoder.parameters())
)
transition_optimizer = optim.Adam(param_list, args.learning_rate, eps=args.adam_epsilon)
reward_optimizer = optim.Adam(reward_model.parameters(), args.learning_rate, eps=args.adam_epsilon)
# load parameters
if args.checkpoint_path is not None:
if os.path.exists(args.checkpoint_path):
model_dicts = torch.load(args.checkpoint_path)
transition_model.load_state_dict(model_dicts['transition_model'])
observation_model.load_state_dict(model_dicts['observation_model'])
reward_model.load_state_dict(model_dicts['reward_model'])
encoder.load_state_dict(model_dicts['encoder'])
transition_optimizer.load_state_dict(model_dicts['transition_optimizer'])
reward_optimizer.load_state_dict(model_dicts['reward_optimizer'])
else:
logging.warning("Model weight file: {} does not exist".format(args.checkpoint_path))
return (transition_model, observation_model, reward_model, encoder), (transition_optimizer, reward_optimizer), param_list
def setup_planner(args: argparse.Namespace, env: Env, transition_model: nn.Module, reward_model: nn.Module) -> nn.Module:
planner = MPCPlanner(
env.action_size,
args.planning_horizon,
args.optimisation_iters,
args.candidates,
args.top_candidates,
transition_model,
reward_model,
env.action_range[0],
env.action_range[1]
)
return planner
def setup(args: argparse.Namespace) -> Tuple[Env,
ExperienceReplay,
Tuple[nn.Module, nn.Module, nn.Module, nn.Module],
Tuple[optim.Optimizer, optim.Optimizer],
List[nn.parameter.Parameter],
nn.Module]:
if not args.test:
setup_seed(args)
setup_workdir(args)
save_args(args)
env = setup_env(args)
D = setup_replay(args, env)
models, optimizers, param_list = setup_models(args, env)
planner = setup_planner(args, env, models[0], models[2])
return env, D, models, optimizers, param_list, planner
def collect_experience(args: argparse.Namespace,
env: Env,
models: Tuple[nn.Module, nn.Module, nn.Module, nn.Module],
planner: nn.Module,
explore: bool = True,
desc: str = "Collecting episode") -> Dict[str, List[torch.Tensor]]:
"""collect an episode by applying policy on the real env.
"""
# unpack models
transition_model, _, _, encoder = models
# storage
experience = {
"belief": [],
"state": [],
"action": [],
"observation": [],
"reward_dist": [],
"reward_coll": [],
"done": []
}
with torch.no_grad():
# h[-1], s[-1], a[-1], o[0]
belief = torch.zeros(1, args.belief_size, device=args.device)
posterior_state = torch.zeros(1, args.state_size, device=args.device)
action = torch.zeros(1, env.action_size, device=args.device)
observation = env.reset()
for _ in trange(args.max_episode_length // args.action_repeat, leave=False, desc=desc):
# h[t] = f(h[t-1], a[t-1])
# s[t] ~ Prob(s|h[t])
# action and observation need extra time dimension because transition model uses batch operation
belief, _, _, _, posterior_state, _, _ = transition_model.forward(
posterior_state,
action.unsqueeze(dim=0),
belief,
encoder(observation.to(device=args.device)).unsqueeze(dim=0))
belief, posterior_state = belief.squeeze(dim=0), posterior_state.squeeze(dim=0)
# a[t] = pi(h[t], s[t]) + noise
# action is bounded by action range
action = planner(belief, posterior_state)
if explore:
action += args.action_noise * torch.randn_like(action)
action.clamp_(min=env.action_range[0], max=env.action_range[1])
# o[t+1] ~ Prob(o|x[t], a[t]), r[t+1], z[t+1]
next_observation, _, done, info = env.step(action[0].cpu())
# save h[t], s[t], a[t], o[t], r[t+1], z[t+1]
experience["belief"].append(belief)
experience["state"].append(posterior_state)
experience["action"].append(action.cpu())
experience["observation"].append(observation)
experience["reward_dist"].append(info["reward_dist"])
experience["reward_coll"].append(info["reward_coll"])
experience["done"].append(done)
if done:
break
else:
observation = next_observation
return experience
def test(args: argparse.Namespace, env: Env, models: Tuple[nn.Module, nn.Module, nn.Module, nn.Module], planner: nn.Module):
for model in models:
model.eval()
# unpack models
_, observation_model, _, _ = models
# collect an episode
with torch.no_grad():
experience = collect_experience(args, env, models, planner, False, desc="Collecting experience 0")
# get observations and predictions
observations = torch.cat(experience["observation"], dim=0)
beliefs = torch.cat(experience["belief"], dim=0)
states = torch.cat(experience["state"], dim=0)
predictions = observation_model.forward(beliefs.to(args.device), states.to(args.device))
# visualize them
visualize_local_map(os.path.join(args.video_dir, "observation.mp4"), observations)
visualize_local_map(os.path.join(args.video_dir, "prediction.mp4"), predictions)
visualize_global_map(os.path.join(args.video_dir, "global.mp4"), args.obstacle_index, observations, predictions)
for model in models:
model.train()
def train(args: argparse.Namespace,
env: Env,
D: ExperienceReplay,
models: Tuple[nn.Module, nn.Module, nn.Module, nn.Module],
optimizer: Tuple[optim.Optimizer, optim.Optimizer],
param_list: List[nn.parameter.Parameter],
planner: nn.Module):
# auxilliary tensors
global_prior = Normal(
torch.zeros(args.batch_size, args.state_size, device=args.device),
torch.ones(args.batch_size, args.state_size, device=args.device)
) # Global prior N(0, I)
# Allowed deviation in KL divergence
free_nats = torch.full((1, ), args.free_nats, dtype=torch.float32, device=args.device)
summary_writter = SummaryWriter(args.tensorboard_dir)
# unpack models
transition_model, observation_model, reward_model, encoder = models
transition_optimizer, reward_optimizer = optimizer
for idx_episode in trange(args.episodes, leave=False, desc="Episode"):
for idx_train in trange(args.collect_interval, leave=False, desc="Training"):
# Draw sequence chunks {(o[t], a[t], r[t+1], z[t+1])} ~ D uniformly at random from the dataset
# The first two dimensions of the tensors are L (chunk size) and n (batch size)
# We want to use o[t+1] to correct the error of the transition model,
# so we need to convert the sequence to {(o[t+1], a[t], r[t+1], z[t+1])}
observations, actions, rewards_dist, rewards_coll, nonterminals = D.sample(args.batch_size, args.chunk_size)
# Create initial belief and state for time t = 0
init_belief = torch.zeros(args.batch_size, args.belief_size, device=args.device)
init_state = torch.zeros(args.batch_size, args.state_size, device=args.device)
# Transition model forward
# deterministic: h[t+1] = f(h[t], a[t])
# prior: s[t+1] ~ Prob(s|h[t+1])
# posterior: s[t+1] ~ Prob(s|h[t+1], o[t+1])
beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = transition_model(
init_state,
actions[:-1],
init_belief,
bottle(encoder, (observations[1:], )),
nonterminals[:-1]
)
# observation loss
predictions = bottle(observation_model, (beliefs, posterior_states))
visual_loss = F.mse_loss(
predictions[:, :, :3*64*64],
observations[1:, :, :3*64*64]
).mean()
symbol_loss = F.mse_loss(
predictions[:, :, 3*64*64:],
observations[1:, :, 3*64*64:]
).mean()
observation_loss = visual_loss + symbol_loss
# KL divergence loss. Minimize the difference between posterior and prior
kl_loss = torch.max(
kl_divergence(
Normal(posterior_means, posterior_std_devs),
Normal(prior_means, prior_std_devs)
).sum(dim=2),
free_nats
).mean(dim=(0, 1)) # Note that normalisation by overshooting distance and weighting by overshooting distance cancel out
if args.global_kl_beta != 0:
kl_loss += args.global_kl_beta * kl_divergence(
Normal(posterior_means, posterior_std_devs),
global_prior
).sum(dim=2).mean(dim=(0, 1))
# overshooting loss
if args.overshooting_kl_beta != 0:
overshooting_vars = [] # Collect variables for overshooting to process in batch
for t in range(1, args.chunk_size - 1):
d = min(t + args.overshooting_distance, args.chunk_size - 1) # Overshooting distance
# Use t_ and d_ to deal with different time indexing for latent states
t_, d_ = t - 1, d - 1
# Calculate sequence padding so overshooting terms can be calculated in one batch
seq_pad = (0, 0, 0, 0, 0, t - d + args.overshooting_distance)
# Store
# * a[t:d],
# * z[t+1:d+1]
# * r[t+1:d+1]
# * h[t]
# * s[t] prior
# * E[s[t:d]] posterior
# * Var[s[t:d]] posterior
# * mask:
# the last few sequences do not have enough length,
# so we pad it with 0 to the same length as previous sequence for batch operation,
# and use mask to indicate invalid variables.
overshooting_vars.append(
(F.pad(actions[t:d], seq_pad),
F.pad(nonterminals[t:d], seq_pad),
F.pad(rewards_dist[t:d], seq_pad[2:]),
beliefs[t_],
prior_states[t_],
F.pad(posterior_means[t_ + 1:d_ + 1].detach(), seq_pad),
F.pad(posterior_std_devs[t_ + 1:d_ + 1].detach(), seq_pad, value=1),
F.pad(torch.ones(d - t, args.batch_size, args.state_size, device=args.device), seq_pad)
)
) # Posterior standard deviations must be padded with > 0 to prevent infinite KL divergences
overshooting_vars = tuple(zip(*overshooting_vars))
# Update belief/state using prior from previous belief/state and previous action (over entire sequence at once)
beliefs, prior_states, prior_means, prior_std_devs = transition_model(
torch.cat(overshooting_vars[4], dim=0),
torch.cat(overshooting_vars[0], dim=1),
torch.cat(overshooting_vars[3], dim=0),
None,
torch.cat(overshooting_vars[1], dim=1)
)
seq_mask = torch.cat(overshooting_vars[7], dim=1)
# Calculate overshooting KL loss with sequence mask
kl_loss += (1 / args.overshooting_distance) * args.overshooting_kl_beta * torch.max(
(kl_divergence(
Normal(torch.cat(overshooting_vars[5], dim=1), torch.cat(overshooting_vars[6], dim=1)),
Normal(prior_means, prior_std_devs)
) * seq_mask).sum(dim=2),
free_nats
).mean(dim=(0, 1)) * (args.chunk_size - 1) # Update KL loss (compensating for extra average over each overshooting/open loop sequence)
# TODO: add learning rate schedule
# Update model parameters
transition_optimizer.zero_grad()
loss = observation_loss * 200 + kl_loss
loss.backward()
nn.utils.clip_grad_norm_(param_list, args.grad_clip_norm, norm_type=2)
transition_optimizer.step()
# reward loss
rewards_dist_predict, rewards_coll_predict = bottle(reward_model.raw, (beliefs.detach(), posterior_states.detach()))
reward_loss = F.mse_loss(
rewards_dist_predict,
rewards_dist[:-1],
reduction='mean'
) + F.binary_cross_entropy(
rewards_coll_predict,
rewards_coll[:-1],
reduction='mean'
)
reward_optimizer.zero_grad()
reward_loss.backward()
reward_optimizer.step()
# add tensorboard log
global_step = idx_train + idx_episode * args.collect_interval
summary_writter.add_scalar("observation_loss", observation_loss, global_step)
summary_writter.add_scalar("reward_loss", reward_loss, global_step)
summary_writter.add_scalar("kl_loss", kl_loss, global_step)
for idx_collect in trange(1, leave=False, desc="Collecting"):
experience = collect_experience(args, env, models, planner, True, desc="Collecting experience {}".format(idx_collect))
T = len(experience["observation"])
for idx_step in range(T):
D.append(experience["observation"][idx_step],
experience["action"][idx_step],
experience["reward_dist"][idx_step],
experience["reward_coll"][idx_step],
experience["done"][idx_step])
# Checkpoint models
if (idx_episode + 1) % args.checkpoint_interval == 0:
record_path = os.path.join(args.checkpoint_dir, "checkpoint")
checkpoint_path = os.path.join(args.checkpoint_dir, 'models_%d.pth' % (idx_episode+1))
torch.save(
{
'transition_model': transition_model.state_dict(),
'observation_model': observation_model.state_dict(),
'reward_model': reward_model.state_dict(),
'encoder': encoder.state_dict(),
'transition_optimizer': transition_optimizer.state_dict(),
'reward_optimizer': reward_optimizer.state_dict()
},
checkpoint_path)
with open(record_path, "w") as f:
f.write('models_%d.pth' % (idx_episode+1))
planner.save(os.path.join(args.torchscript_dir, "mpc_planner.pth"))
transition_model.save(os.path.join(args.torchscript_dir, "transition_model.pth"))
reward_model.save(os.path.join(args.torchscript_dir, "reward_model.pth"))
observation_model.save(os.path.join(args.torchscript_dir, "observation_decoder.pth"))
encoder.save(os.path.join(args.torchscript_dir, "observation_encoder.pth"))
summary_writter.close()
def main():
args = postprocess_args(get_args())
env, D, models, optimiser, param_list, planner = setup(args)
if not args.test:
train(args, env, D, models, optimiser, param_list, planner)
test(args, env, models, planner)
if __name__ == "__main__":
main()
``` |
{
"source": "jiangerji/iam007-mobile-server",
"score": 2
} |
#### File: iam007-mobile-server/controllers/ilauncher.py
```python
import json
import os
import time
import platform
import HTMLParser
import subprocess
import urllib
from logutil import logger
import threading
MYSQL_HOST = "jiangerji.mysql.rds.aliyuncs.com"
MYSQL_PASSPORT = "jiangerji"
MYSQL_PASSWORD = "<PASSWORD>"
MYSQL_DATABASE = "spider"
APIS_HOST = "http://192.168.3.11:802/iam007"
if platform.system() == 'Windows' and False:
APIS_HOST = "http://192.168.54.9:8000/iam007"
MYSQL_HOST="localhost"
MYSQL_PASSPORT="root"
MYSQL_PASSWORD="<PASSWORD>"
MYSQL_DATABASE="spider"
dal = None#DAL('sqlite://wanke.sqlite3.sqlite')
def _init():
global dal, MYSQL_PASSPORT, MYSQL_PASSWORD, MYSQL_HOST, MYSQL_DATABASE
if dal is None:
"""
SQLite sqlite://storage.db
MySQL mysql://username:password@localhost/test
PostgreSQL postgres://username:password@localhost/test
"""
command = "mysql://%s:%s@%s/%s"%(MYSQL_PASSPORT, MYSQL_PASSWORD, MYSQL_HOST, MYSQL_DATABASE)
dal = DAL(command)
def _getTagsStaticFile(filename, mode="r", returnMode="content"):
filePath = os.path.join(os.getcwd(), "applications")
filePath = os.path.join(filePath, "iam007")
filePath = os.path.join(filePath, "static")
filePath = os.path.join(filePath, filename)
if returnMode == "path":
return filePath
return open(filePath, mode)
def update():
# 更新app scheme, trackid=1111&scheme=com.a.a.a.a:com.a.a.a
global dal, APIS_HOST
preTime = time.time()
_init()
print "init cost:", (time.time() - preTime)
preTime = time.time()
parseRequest()
trackid = request.vars.get("trackid")
scheme = request.vars.get("scheme")
logger.info("update")
logger.info("\t%s"%str(request.vars))
if trackid is None or scheme is None:
return "parameter error!"
# 必须是scheme为空
cmd = 'select scheme from appstores where trackid="%s"'%trackid
schemeIn = dal.executesql(cmd)[0][0]
isExist = (schemeIn is None) or (len(schemeIn.strip()) == 0)
if isExist:
cmd = 'update appstores set scheme="%s" where trackid=%s;'%(scheme, trackid)
print cmd
dal.executesql(cmd)
print "commit", dal.commit()
logger.info("\t%s"%str(isExist))
return json.dumps({"result":isExist})
def getUnhandleApps():
# 获取没有处理的应用信息
global dal, APIS_HOST
preTime = time.time()
_init()
print "init cost:", (time.time() - preTime)
preTime = time.time()
parseRequest()
# limit=20&index=1
limit = 20
index = 0
if request.vars.has_key("index"):
try:
index = int(request.vars.get("index"))
except Exception, e:
pass
if index < 0:
index = 0
cmd = "select trackid, name, icon60, icon512, price from appstores where ipadonly=0 and scheme is null limit %d offset %d;"%(limit, index*limit)
result = dal.executesql(cmd)
apps = []
for app in result:
trackid, name, icon60, icon512, price = app
appInfo = {}
appInfo["trackid"] = trackid
appInfo["name"] = name
appInfo["icon60"] = icon60
appInfo["icon512"] = icon512
appInfo["url"] = "https://itunes.apple.com/app/id%s?mt=8"%trackid
appInfo["price"] = price
apps.append(appInfo)
return json.dumps({"data":apps})
def checkUpdate():
global dal, APIS_HOST
preTime = time.time()
_init()
print "init cost:", (time.time() - preTime)
preTime = time.time()
parseRequest()
maxVersion = -1
command = "select max(version) from appstores where version > 0;"
maxVersion = dal.executesql(command)[0][0]
appSchemeJsonVersion = None
if request.vars.has_key("schemeJsonVersion"):
appSchemeJsonVersion = int(request.vars.get("schemeJsonVersion"))
if appSchemeJsonVersion == None or appSchemeJsonVersion > maxVersion:
appSchemeJsonVersion = 0
result = {}
if appSchemeJsonVersion == maxVersion:
# 不需要更新
result = {"result":False}
elif appSchemeJsonVersion == 0:
# return os.getcwd()
# 返回全部的scheme json数据
content = json.load(_getTagsStaticFile("extSchemeApps.json"))
result = {"result":True, "data":content, "schemeJsonVersion":maxVersion}
else:
# 返回更新增量数据
content = json.load(_getTagsStaticFile("scheme/extSchemeApps_%d_%d.json"%(int(maxVersion), int(appSchemeJsonVersion))))
result = {"result":True, "data":content, "schemeJsonVersion":maxVersion}
# cmd = 'select `value` from appconfig where name="forceupdate";'
# forceUpdate = False
# try:
# a = dal.executesql(cmd)[0][0]
# if int(a) > 0:
# forceUpdate = True
# except Exception, e:
# pass
# result["forceUpdate"] = forceUpdate
# result["url"] = "https://itunes.apple.com/cn/app/id414478124?mt=8";
return json.dumps(result)
def checkVersion():
global dal, APIS_HOST
preTime = time.time()
_init()
print "checkVersion init cost:", (time.time() - preTime)
preTime = time.time()
parseRequest()
version = None
if request.vars.has_key("version"):
version = request.vars.get("version")
print "version is:", version
result = {}
needUpdate = True
if version is not None:
cmd = 'select `value` from appconfig where name="appversion";'
try:
latestVersion = dal.executesql(cmd)[0][0]
print "latestVersion is:", latestVersion
if latestVersion == version:
needUpdate = False
except Exception, e:
pass
# 暂时为false
# needUpdate = False
result["update"] = needUpdate
cmd = 'select `value` from appconfig where name="forceupdate";'
forceUpdate = False
if needUpdate:
try:
a = dal.executesql(cmd)[0][0]
if int(a) > 0:
forceUpdate = True
except Exception, e:
pass
result["forceUpdate"] = forceUpdate
result["url"] = "https://itunes.apple.com/cn/app/id988742317?mt=8";
result["shake"] = False # shake the icon when in edit mode
return json.dumps(result)
def _commitThread(content):
filePath = _getTagsStaticFile(".update"+os.path.sep+"commit_%d.json"%long(time.time()), returnMode="path")
commitFile = open(os.path.abspath(filePath), "w")
commitFile.write(content)
commitFile.close()
spiderPath = "E:\\git\\iam007-spider\\AppStore\\AppStoreSpider.py"
cmd = 'python "%s" update "%s"'%(spiderPath, filePath)
# os.system(cmd.encode("utf-8"))
# global dal
# _init()
# for trackid in content.keys():
# try:
# schemes = ":".join(content.get(trackid))
# cmd = 'select count(*) from appstores where trackid="%s";'%trackid
# if dal.executesql(cmd)[0][0] > 0:
# # 已经存在,更新
# cmd = 'update appstores set scheme="%s" where trackid="%s";'%(schemes, trackid)
# dal.executesql(cmd)
# logger.info("update %s scheme to %s"%(trackid, schemes))
# print "update %s scheme to %s"%(trackid, schemes)
# else:
# # 不存在,获取应用名称和icon,一并插入
# logger.info("insert %s scheme to %s"%(trackid, schemes))
# print "insert %s scheme to %s"%(trackid, schemes)
# except Exception, e:
# pass
# dal.commit()
def commit():
parseRequest()
result = True
# t1 = threading.Thread(target=_commitThread, args=(request.vars.keys()[0],))
# t1.setDaemon(True)
# t1.start()
import tempfile
filepath = tempfile.mktemp()
fp = open(filepath, "w")
fp.write(request.vars.keys()[0])
fp.close()
try:
commitScriptPath = ["applications", "iam007", "modules", "Commit.py"]
commitScriptPath = os.path.sep.join(commitScriptPath)
cmd = 'python "%s" "%s"'%(commitScriptPath, filepath)
# os.system(cmd)
params = urllib.urlencode({"file":filepath})
cmd = 'wget -q http://127.0.0.1:9156/commitWithFile?%s'%(params)
subprocess.Popen(cmd, shell=True)
except Exception, e:
print e
# t1 = threading.Thread(target=_commitExecute, args=(filepath,))
# t1.setDaemon(True)
# t1.start()
return json.dumps({"result":result})
def task():
global dal, APIS_HOST
preTime = time.time()
_init()
print "init cost:", (time.time() - preTime)
preTime = time.time()
parseRequest()
action = request.vars.get("action")
appleid = request.vars.get("id")
trackid = request.vars.get("trackid")
# debug
# trackid = '9585078694'
# appleid = "jiangerji"
# action = 'get'
print request.vars
result = {}
(_BLANK, _MY_OWNER, _OTHER_OWNER, _PARAMS_ERROR) = (0, 1, 2, -1)
state = _BLANK
if action is None or trackid is None or appleid is None:
state = _PARAMS_ERROR
elif action == "get":
# action=get&trackid=111&appleid=111
# 领取任务, 检查是否已经被自己领取了
alreadyInTask = False
cmd = 'select trackid, owner from taskstate where trackid=%s;'%trackid
try:
rr = dal.executesql(cmd)
print rr
if len(rr) > 0:
# 任务已经被领取
alreadyInTask = True
# 是否被自己领取
if rr[0][1] == appleid:
state = _MY_OWNER # 自己领取
else:
state = _OTHER_OWNER # 别人领取
else:
# 无人领取
state = _BLANK
except Exception, e:
pass
if state == _BLANK:
# 无人领取,
cmd = 'insert into taskstate (trackid, state, owner, timestamp) VALUES ("%s", %s, "%s", "%s")'%(trackid, 1, appleid, time.strftime("%Y_%m_%d_%H_%M_%S"))
dal.executesql(cmd)
elif action == "complete":
# action=complete&trackid=111&appleid=111&scheme=aaa.aaa.aaa:vv:vvs
scheme = request.vars.get("scheme")
print "scheme is ", scheme
if (scheme is None) or (len(scheme.strip()) == 0):
state = _PARAMS_ERROR # 参数错误
else:
# 完成任务
# 从taskstate中删除
cmd = 'delete from taskstate where trackid=%s'%trackid
dal.executesql(cmd)
print "delete from taskstate"
# 插入到task表中
cmd = 'insert into task (trackid, scheme, owner, timestamp) VALUES ("%s", "%s", "%s", "%s")'%(trackid, scheme, appleid, time.strftime("%Y_%m_%d_%H_%M_%S"))
dal.executesql(cmd)
print "insert into task"
# 插入到appstores表中
cmd = 'update appstores set scheme="%s" where trackid=%s;'%(scheme, trackid)
dal.executesql(cmd)
print "insert into appstores"
dal.commit()
result["result"] = state
return json.dumps(result)
def unhandle():
global dal
preTime = time.time()
_init()
print "init cost:", (time.time() - preTime)
preTime = time.time()
parseRequest()
appType = None
if request.vars.has_key("type"):
appType = request.vars.get("type")
if appType is None:
appType = "all"
cmd = 'select name, trackid, price from appstores where scheme is null;'
if appType == "charge":
cmd = 'select name, trackid, price from appstores where scheme is null and price > 0;'
elif appType == "free":
cmd = 'select name, trackid, price from appstores where scheme is null and price <= 0;'
result = dal.executesql(cmd)
result = map(lambda x: (x[0], "https://itunes.apple.com/cn/app/id%s"%x[1], " %0.2f"%int(x[2])), result)
return dict(appInfos=result)
def help():
# 帮助页面
global dal
preTime = time.time()
_init()
print "init cost:", (time.time() - preTime)
preTime = time.time()
parseRequest()
redirect("http://chuye.cloud7.com.cn/5304123")
return ""
def about():
# 关于页面
global dal
preTime = time.time()
_init()
print "init cost:", (time.time() - preTime)
preTime = time.time()
parseRequest()
redirect("http://chuye.cloud7.com.cn/3017283")
return ""
def support():
# 技术支持页面,提交app store使用
global dal
preTime = time.time()
_init()
print "init cost:", (time.time() - preTime)
preTime = time.time()
parseRequest()
redirect("http://chuye.cloud7.com.cn/3014794")
return ""
def config():
# 获取应用程序的配置信息
content = ""
try:
content = json.load(_getTagsStaticFile("AppConfig.json"))
except Exception, e:
print e
return json.dumps(content)
def configL():
# 获取应用程序的配置信息
debugMode = False
if request.vars.has_key("debug"):
debugMode = True
content = ""
try:
if debugMode:
content = json.load(_getTagsStaticFile("AppConfigL_debug.json"))
else:
content = json.load(_getTagsStaticFile("AppConfigL.json"))
except Exception, e:
print e
return json.dumps(content)
def guideInstallApp():
# QQ,微信, 微博, YY, 陌陌
# 网易新闻,
# 爱奇艺、腾讯视频,
# DOTA传奇,
# QQ音乐, 百度音乐
# 百度地图,高德地图
# 美颜相机
# 获取应用程序的配置信息
content = ""
try:
content = json.load(_getTagsStaticFile("guideinstallapp.json"))
except Exception, e:
print e
return json.dumps(content)
def appConfig():
# app配置的后台界面
if request.vars.has_key("key"):
os = request.vars.get("key")
if os is not None and os == "963852741":
pass
else:
return "haha"
else:
return "hah"
forms = []
configs = [
{
"name": "应用程序版本号",
"key": "appVersion",
"requires": None
},
{
"name": "是否需要强制更新",
"key": "forceUpdate",
"requires": None
},
{
"name": "应用程序itunes地址",
"key": "url",
"requires": None
}
]
for config in configs:
requiresFunc = IS_NOT_EMPTY
form = FORM(config.get("name"),
INPUT(_name=config.get("key"), requires=requiresFunc()),
INPUT(_type='submit'))
forms.append(form)
if form.accepts(request,session):
request.sResult = 'form accepted'
print request.sResult, str(form)
elif form.errors:
response.sResult = 'form has errors'
print request.sResult, str(form)
else:
response.sResult = 'please fill the form'
print request.sResult, str(form)
return dict(forms=forms)
def parseRequest():
# 发起请求客户端的操作系统类型
platform = ""
if request.vars.has_key("platform"):
platform = request.vars.get("platform")
# 发起请求客户端的操作系统版本
os = ""
if request.vars.has_key("os"):
os = request.vars.get("os")
# 发起请求客户端的版本
version = ""
if request.vars.has_key("version"):
version = request.vars.get("version")
token = ""
if request.vars.has_key("token"):
token = request.vars.get("token")
return platform, os, version, token
``` |
{
"source": "JiangFeng07/feng-python-apply",
"score": 4
} |
#### File: feng-ml-python/src/KNN.py
```python
import numpy as np
class KNN(object):
def __init__(self, n_neighbors):
self.n_neighbors = n_neighbors
pass
def predict(self, x, y, neighbors):
ranks = []
for i in range[len(neighbors)]:
c = x - neighbors[i]
ranks.append(np.inner(c, c))
pass
if __name__ == "__main__":
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
c = b - a
print(np.inner(c, c))
```
#### File: feng-ml-tf/src/data_helper.py
```python
import collections
import tensorflow as tf
def load_data(filename, sep=' ', sep1=',', isCharacter=False):
label_list = []
features_list = []
with tf.gfile.GFile(filename, 'r') as f:
for line in f.readlines():
fields = line.strip().split(sep)
if len(fields) != 2:
continue
label = fields[0]
features = fields[1]
label_list.append(label)
if isCharacter:
features_list.append(list(features))
else:
features_list.append(features.split(sep1))
return label_list, features_list
def gen(filepath):
with tf.gfile.GFile(filepath, 'r') as f:
for line in f.readlines():
fields = line.strip().split(' ')
if len(fields) != 2:
continue
label = fields[0]
features = fields[1]
yield (label, features.split(','))
def build_word_dic(words_list, label_list, vocab_size=5000):
word_dic = dict()
word_dic['pad'] = 0
word_dic['unk'] = 1
all_words = []
for words in words_list:
all_words.extend(words)
counter = collections.Counter(all_words).most_common(vocab_size)
words, _ = list(zip(*counter))
for word in words:
word_dic[word] = len(word_dic)
label_set = set(label_list)
label_dic = dict()
for label in label_set:
label_dic[label] = len(label_dic)
return words, word_dic, label_set, label_dic
def build_dic_hash_table(word_dic, label_dic):
word_keys = tf.constant(list(word_dic.keys()))
word_values = tf.constant(list(word_dic.values()))
word_table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(word_keys, word_values), word_dic['unk'])
label_keys = tf.constant(list(label_dic.keys()))
label_values = tf.constant(list(label_dic.values()))
label_table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(label_keys, label_values), -1)
return word_table, label_table
def train_input_fn(label_list, features_list, shuffle_size, batch_size):
dataset = tf.data.Dataset.from_tensor_slices((label_list, features_list))
dataset = dataset.shuffle(shuffle_size).repeat().batch(batch_size)
return dataset
def build_table_from_text_file(filepath):
return tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileInitializer(filepath, tf.string, 0, tf.int64, 1, delimiter=" "), -1)
if __name__ == '__main__':
# label_list, features_list = load_data('/tmp/1.csv')
# words, word_dic, labels, label_dic = build_word_dic(features_list, label_list)
# word_table, label_table = build_dic_hash_table(word_dic, label_dic)
sess = tf.InteractiveSession()
# word_out = word_table.lookup(tf.constant(list(word_dic.keys())))
# label_out = label_table.lookup(tf.constant(list(label_dic.keys())))
# tf.tables_initializer().run()
# print(word_out.eval())
# print(label_out.eval())
table = build_table_from_text_file('/tmp/2.csv')
out = table.lookup(tf.constant(['emerson']))
table.init.run()
print(out.eval())
```
#### File: feng-ml-tf/src/DataSetExample.py
```python
import tensorflow as tf
filepath = '/tmp/ner_data_test'
def gen():
with tf.gfile.GFile(filepath, 'r') as f:
lines = [line.strip().split(' ') for line in f]
index = 0
while True:
label = lines[index][0]
features = list(lines[index][1])
yield (features, label)
index += 1
if index == len(lines):
index = 0
if __name__ == '__main__':
dataset = tf.data.Dataset.from_tensors(tf.constant([['jiang', 'feng'], ['messi', 'henry']]))
print(dataset.output_shapes)
print(dataset.output_types)
dataset2 = tf.data.Dataset.from_tensor_slices(tf.constant([['jiang', 'feng'], ['messi', 'henry']]))
print(dataset.output_shapes)
print(dataset.output_types)
dataset3 = tf.data.Dataset.from_generator(gen, (tf.string, tf.string),
(tf.TensorShape([None]), tf.TensorShape([])))
dataset4 = tf.data.Dataset.range(100).map(
lambda x: x + tf.random_uniform([], -10, 10, tf.int64))
print(dataset4.output_shapes)
print(dataset4.output_types)
iterator = dataset3.make_one_shot_iterator()
next_element = iterator.get_next()
with tf.Session() as sess:
for i in range(6):
try:
# sess.run(next_element)
print(sess.run(next_element))
except tf.errors.OutOfRangeError:
break
```
#### File: feng-ml-tf/src/TFRecordExample.py
```python
import tensorflow as tf
def generate_tfrecords(tfrecord_name, labels, sequences):
with tf.python_io.TFRecordWriter(tfrecord_name) as f:
for feature, label in zip(sequences, labels):
frame_feature = list(map(lambda id: tf.train.Feature(int64_list=tf.train.Int64List(value=[id])), feature))
print(frame_feature)
example = tf.train.SequenceExample(
context=tf.train.Features(feature={
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[label]))
}),
feature_lists=tf.train.FeatureLists(feature_list={
'sequence': tf.train.FeatureList(feature=frame_feature)
})
)
f.write(example.SerializeToString())
def single_example_parser(serialized_example):
context_features = {
'label': tf.FixedLenFeature([], dtype=tf.int64)
}
sequence_features = {
'sequence': tf.FixedLenSequenceFeature([], dtype=tf.int64)
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=serialized_example,
context_features=context_features,
sequence_features=sequence_features
)
labels = context_parsed['label']
sequences = sequence_parsed['sequence']
return sequences, labels
def batched_data(tfrecord_filename, single_example_parser, batch_size, padded_shapes, num_epochs=1, buffer_size=1000):
dataset = tf.data.TFRecordDataset(tfrecord_filename) \
.map(single_example_parser) \
.padded_batch(batch_size, padded_shapes=padded_shapes) \
.shuffle(buffer_size) \
.repeat(num_epochs)
return dataset.make_one_shot_iterator().get_next()
if __name__ == '__main__':
def model(features, labels):
return features, labels
sequences = [[1], [2, 2], [3, 3, 3], [4, 4, 4, 4], [5, 5, 5, 5, 5],
[1], [2, 2], [3, 3, 3], [4, 4, 4, 4]]
labels = [1, 2, 3, 4, 5, 1, 2, 3, 4]
generate_tfrecords('/tmp/test.tfrecord', labels, sequences)
out = model(*batched_data('/tmp/test.tfrecord', single_example_parser, 2, ([None], [])))
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
while not coord.should_stop():
print(sess.run(out))
except tf.errors.OutOfRangeError:
print("done training")
finally:
coord.request_stop()
coord.join(threads)
```
#### File: feng-nlp-python/utils/Excel2CSV.py
```python
import pandas as pd
__author__ = 'Henry'
def pre(text):
text = str(text).replace('\\', '\\\\').replace('\r', '').replace('\n', '').replace('\t', '').strip()
if text == 'nan':
return ''
else:
return text
def pre_bool(text):
if text == '是':
return '1'
else:
return '-1'
# 城市 商户ID 商户名 菜品分类 菜名 是否招牌菜 分量 价格 做法 食材 内容备注 用餐时间 备注 照片编码 照片备注
df = pd.read_excel('~/Downloads/MVP.xlsx', '菜单明细')
print(len(df))
print(df.head())
file = open('/tmp/match3.csv', 'w', encoding='utf-8')
for i in range(len(df)):
file.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (
pre(df['城市'][i])
, pre(df['商户ID'][i])
, pre(df['商户名'][i])
, pre(df['菜品分类'][i])
, pre(df['菜名'][i])
, pre_bool(df['是否招牌菜'][i])
, pre(df['分量'][i])
, pre(df['价格'][i])
, pre(df['做法'][i])
, pre(df['食材'][i])
, pre(df['内容备注'][i])
, pre(df['用餐时间'][i])
, pre(df['备注'][i])
, pre(df['照片编码'][i])
, pre(df['照片备注'][i])
, 'END'
))
file.close()
```
#### File: movie/middlewares/random_user_agent.py
```python
import random
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
import logging
class RandomUserAgentMiddleware(object):
# the default user_agent_list composes chrome,I E,firefox,Mozilla,opera,netscape
# for more user agent strings,you can find it in http://www.useragentstring.com/pages/useragentstring.php
user_agent_list = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36",
"User-Agent:Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.13 (KHTML, like Gecko) Chrome/24.0.1290.1 Safari/537.13",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/537.13 (KHTML, like Gecko) Chrome/24.0.1290.1 Safari/537.13",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.6 Safari/537.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_0) AppleWebKit/537.4 (KHTML, like Gecko) Chrome/22.0.1229.79 Safari/537.4",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.22 (KHTML, like Gecko) Chrome/19.0.1047.0 Safari/535.22",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.45 Safari/535.19",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.45 Safari/535.19",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.66 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.66 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.66 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_5_8) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.66 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.65 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.65 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_4) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.65 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_6) AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.112 Safari/534.30",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_4) AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.112 Safari/534.30",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.100 Safari/534.30",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_4) AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.100 Safari/534.30"
]
def __init__(self, user_agent=''):
self.logger = logging.getLogger("movie.middlewares.random_user_agent")
self.user_agent = user_agent
def process_request(self, request, spider):
ua = random.choice(self.user_agent_list)
if request.headers.get('USER_AGENT') is not None:
return
request.headers.setdefault('USER_AGENT', ua)
self.logger.info("process request %s using random ua: %s" % (request, ua))
``` |
{
"source": "JiangFeng07/NLPIK",
"score": 2
} |
#### File: model/entity_recognition/bert_crf.py
```python
import os
import re
import tensorflow as tf
from tensorflow import nn
from model.bert import tokenization
from model.bert import modeling
# from model.bert import AdamWeightDecayOptimizer
from model.bert.optimization import AdamWeightDecayOptimizer
from model.bert.run_classifier import DataProcessor
from model.bert.tokenization import load_vocab
tf.logging.set_verbosity(tf.logging.INFO)
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("train_file", '', "SQuAD json for training. E.g., train_data.csv")
flags.DEFINE_string("evaluate_file", '', "SQuAD json for predictions. E.g., train_data.csv")
flags.DEFINE_string("bert_config_file",
r'D:\judgement-nlp-server\bert-chinese\chinese_L-12_H-768_A-12\bert_config.json',
"The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.")
flags.DEFINE_string("vocab_file", r'D:\judgement-nlp-server\bert-chinese\chinese_L-12_H-768_A-12\vocab.txt',
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_integer("batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.")
flags.DEFINE_float("warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.")
flags.DEFINE_string("init_checkpoint", r'D:\judgement-nlp-server\bert-chinese\chinese_L-12_H-768_A-12\bert_model.ckpt',
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string("output_dir", '../../', "The output directory where the model checkpoints will be written.")
class InputExample(object):
"""A single training/test example for simple sequence name entity recognition."""
def __init__(self, guid, text, labels=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text = text
self.labels = labels
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
tokens,
input_ids,
input_mask,
segment_ids,
label_ids):
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
class NerProcessor(DataProcessor):
def get_train_examples(self, texts):
i = 0
examples = []
for text in texts:
# fields = text.decode('utf-8').split('\t')
fields = text.split('\t')
if len(fields) != 2:
continue
label, text = tuple(fields)
guid = "train-%d" % (i)
text = tokenization.convert_to_unicode(text)
label = tokenization.convert_to_unicode(label)
examples.append(InputExample(guid, text, label))
return examples
def get_dev_examples(self, data_dir):
pass
def get_test_examples(self, texts):
i = 0
examples = []
for text in texts:
guid = "test-%d" % (i)
text = tokenization.convert_to_unicode(text)
label = tokenization.convert_to_unicode('')
examples.append(InputExample(guid, text, label))
return examples
def get_labels(self):
pass
def convert_batch_example(examples, max_seq_length, vocab, label_dicts):
"""Converts a single `InputExample` into a single `InputFeatures`."""
input_ids, input_mask, segment_ids, label_ids = [], [], [], []
for example in examples:
labels, text = example.labels, example.text
_input_ids, _label_ids, tokens = [], [], []
tokens.append('[CLS]')
for token in list(text):
tokens.append(token)
tokens.append('[SEP]')
_input_ids.append(vocab['[CLS]'])
_label_ids.append(label_dicts['O'])
try:
tmp_input_ids, tmp_label_ids = convert_tokens_labels_to_ids(text, labels, vocab, label_dicts)
except:
continue
_input_ids.extend(tmp_input_ids)
_label_ids.extend(tmp_label_ids)
_input_ids.append(vocab['[SEP]'])
_label_ids.append(label_dicts['O'])
_input_mask = [1] * len(_input_ids)
while len(_input_ids) < max_seq_length:
_input_ids.append(0)
_input_mask.append(0)
_label_ids.append(label_dicts['O'])
_segment_ids = [0] * len(_input_ids)
assert len(_input_ids) == max_seq_length
assert len(_input_mask) == max_seq_length
assert len(_segment_ids) == max_seq_length
assert len(_label_ids) == max_seq_length
input_ids.append(_input_ids)
input_mask.append(_input_mask)
segment_ids.append(_segment_ids)
label_ids.append(_label_ids)
return input_ids, input_mask, segment_ids, label_ids
def convert_tokens_labels_to_ids(text, labels, vocab, label_dicts):
input_ids = convert_by_vocab(vocab, text)
label_list = labels.split(';')
label_map = {}
for label in label_list:
fields = label.split(':')
if len(fields) != 2:
continue
label_map[fields[0]] = fields[1]
label_ids = len(input_ids) * [label_dicts['O']]
for key, val in label_map.items():
for ele in re.finditer(key, text):
start_index = ele.span()[0]
end_index = ele.span()[1]
label_ids[start_index] = label_dicts['B-' + val]
while start_index + 1 < end_index:
label_ids[start_index + 1] = label_dicts['I-' + val]
start_index += 1
return input_ids, label_ids
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab.get(item, vocab.get('[UNK]')))
return output
def cut_text(context, max_seq_length=350):
text_list = []
if not context:
return text_list
tags = '。!!??;;'
while len(context) > max_seq_length - 2:
if context[max_seq_length - 2] in tags:
text_list.append(context[:max_seq_length - 2])
context = context[max_seq_length - 2:]
else:
tmp_text = context[:max_seq_length - 2]
arr = re.split('[%s]' % tags, tmp_text)
if len(arr) > 1:
index = len(''.join(arr[:-1])) + len(arr) - 1
else:
index = max_seq_length - 2
text_list.append(context[:index])
context = context[index:]
if len(context) > 0:
text_list.append(context)
return text_list
def count_train_file(file_path):
count = 0
with tf.io.gfile.GFile(file_path, 'r') as f:
for _ in f:
count += 1
return count
class BertCRF(object):
def __init__(self, is_training=True, units=100, log_file_path='log.csv'):
self.units = units
self.is_training = is_training
self.log_file_path = log_file_path
self.global_step = tf.Variable(0, trainable=False)
self.model_path = os.path.join(FLAGS.output_dir, 'model/bert_ner_model_final')
self.summary_path = os.path.join(FLAGS.output_dir, 'tensorboard')
self.bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
self.vocab = load_vocab(FLAGS.vocab_file)
self.label_dicts = {'O': 0, 'B-PER': 1, 'I-PER': 2, 'B-COM': 3, 'I-COM': 4, 'B-ORG': 5, 'I-ORG': 6, 'B-LOC': 7,
'I-LOC': 8}
self.id_to_label = {val: key for key, val in self.label_dicts.items()}
self.num_labels = len(self.label_dicts)
self.input_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name='input_ids')
self.input_mask = tf.placeholder(dtype=tf.int32, shape=[None, None], name='input_mask')
self.segment_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name='segment_ids')
self.label_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name='label_id')
self.dropout = tf.placeholder(dtype=tf.float32, name='dropout')
self.lengths = tf.reduce_sum(self.input_mask, axis=1)
self.batch_size = tf.shape(self.input_ids)[0]
# [batch_size, max_seq_length, 768]
self.bert_embedding = self.bert_embedding()
self.bilstm_layer = self.bilstm_layer(self.bert_embedding)
self.dense_layer = tf.keras.layers.Dense(self.num_labels)(self.bilstm_layer)
self.scores = tf.reshape(self.dense_layer, [-1, FLAGS.max_seq_length, self.num_labels])
## crf
# 计算log-likelihood并获得transition_params
self.log_likelihood, self.transition_params = tf.contrib.crf.crf_log_likelihood(self.scores, self.label_ids,
self.lengths)
self.loss = tf.reduce_mean(-self.log_likelihood)
# 进行解码(维特比算法),获得解码之后的序列viterbi_sequence和分数viterbi_score
self.viterbi_sequence, viterbi_score = tf.contrib.crf.crf_decode(self.scores, self.transition_params,
self.lengths)
if is_training:
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,
FLAGS.init_checkpoint)
tf.train.init_from_checkpoint(FLAGS.init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
train_vars = []
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
else:
train_vars.append(var)
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
with tf.variable_scope('optimizer'):
# grads = tf.gradients(self.loss, train_vars)
# (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
#
# self.train_op = tf.train.AdamOptimizer(learning_rate=0.01).apply_gradients(
# zip(grads, train_vars), global_step=self.global_step)
self.train_data_count = count_train_file(FLAGS.train_file)
num_train_steps = int(self.train_data_count / FLAGS.batch_size)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
self.train_op = self.create_optimizer(self.loss, init_lr=5e-5, num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps)
self.correct_num = tf.cast(tf.equal(self.label_ids, self.viterbi_sequence), tf.float32)
self.accuracy = tf.reduce_mean(self.correct_num)
def create_optimizer(self, loss, init_lr, num_train_steps, num_warmup_steps):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(learning_rate, global_step, num_train_steps, end_learning_rate=0.0,
power=1.0, cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = ((1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = AdamWeightDecayOptimizer(learning_rate=learning_rate, weight_decay_rate=0.01, beta_1=0.9,
beta_2=0.999, epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=global_step)
# Normally the global step update is done inside of `apply_gradients`.
# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use
# a different optimizer, you should probably take this line out.
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
def bert_embedding(self):
bert_model = modeling.BertModel(config=self.bert_config, is_training=self.is_training, input_ids=self.input_ids,
input_mask=self.input_mask, token_type_ids=self.segment_ids,
use_one_hot_embeddings=False)
embedding = bert_model.get_sequence_output()
return embedding
def bilstm_layer(self, embed):
fw_cell = tf.nn.rnn_cell.BasicLSTMCell(self.units)
bw_cell = tf.nn.rnn_cell.BasicLSTMCell(self.units)
outputs, final_states = nn.bidirectional_dynamic_rnn(cell_fw=fw_cell, cell_bw=bw_cell, inputs=embed,
dtype=tf.float32)
output_fw, output_bw = outputs
output = tf.concat([output_fw, output_bw], axis=-1)
return output
def train_step(self, sess, records, ner_processor):
examples = ner_processor.get_train_examples(records)
input_ids, input_mask, segment_ids, label_ids = convert_batch_example(examples,
max_seq_length=FLAGS.max_seq_length,
vocab=self.vocab,
label_dicts=self.label_dicts)
train_op, accuracy, loss = sess.run([self.train_op, self.accuracy, self.loss],
feed_dict={self.input_ids: input_ids, self.input_mask: input_mask,
self.segment_ids: segment_ids, self.label_ids: label_ids,
self.dropout: 0.5})
return train_op, accuracy, loss
def evaluate_step(self, sess, records, ner_processor):
evaluate_examples = ner_processor.get_train_examples(records)
evaluate_input_ids, evaluate_input_mask, evaluate_segment_ids, evaluate_label_ids = convert_batch_example(
evaluate_examples, max_seq_length=FLAGS.max_seq_length, vocab=self.vocab, label_dicts=self.label_dicts)
evaluate_accuracy, evaluate_loss, correct_num = sess.run([self.accuracy, self.loss, self.correct_num],
feed_dict={self.input_ids: evaluate_input_ids,
self.input_mask: evaluate_input_mask,
self.segment_ids: evaluate_segment_ids,
self.label_ids: evaluate_label_ids,
self.dropout: 1.0})
return evaluate_accuracy, evaluate_loss, correct_num
def train(self):
tf.summary.scalar("loss", self.loss)
tf.summary.scalar("accuracy", self.accuracy)
tf.summary.merge_all()
ner_processor = NerProcessor()
saver = tf.train.Saver()
max_acc = 0.0
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
with tf.io.gfile.GFile(FLAGS.train_file, 'r') as f:
train_records, train_nums_batch = [], 0
for line in f:
if len(train_records) == FLAGS.batch_size:
train_op, accuracy, loss = self.train_step(sess, train_records, ner_processor)
train_nums_batch += 1
if train_nums_batch % 10 == 0:
with tf.io.gfile.GFile(FLAGS.evaluate_file, 'r') as evaluate_f:
evaluate_nums_batch = 0
evaluate_accuracy, evaluate_loss = 0.0, 0.0
evaluate_records = []
for evaluate_line in evaluate_f:
if len(evaluate_records) == FLAGS.batch_size:
_accuracy, _loss, correct_num = self.evaluate_step(
sess, evaluate_records, ner_processor)
evaluate_nums_batch += 1
evaluate_accuracy += _accuracy
evaluate_loss += _loss
evaluate_records = []
evaluate_records.append(evaluate_line.strip())
if len(evaluate_records) > 0: _accuracy, _loss, correct_num = self.evaluate_step(sess,
evaluate_records,
ner_processor)
evaluate_nums_batch += 1
evaluate_accuracy += _accuracy
evaluate_loss += _loss
evaluate_accuracy, evaluate_loss = round((evaluate_accuracy / evaluate_nums_batch),
2), round(
(evaluate_loss / evaluate_nums_batch), 2)
if evaluate_accuracy > max_acc: saver.save(sess=sess, save_path=self.model_path)
tf.summary.FileWriter(self.summary_path, graph=sess.graph)
max_acc = evaluate_accuracy
tf.logging.info(
'%d batch: train accuracy is %f, train loss is %f; evaluate accuracy is %f, evaluate loss is %f\n' % (
train_nums_batch, round(accuracy, 6), round(loss, 6),
round(evaluate_accuracy, 6),
round(evaluate_loss, 6)))
train_records = []
train_records.append(line.strip())
if len(train_records) > 0:
train_op, accuracy, loss = self.train_step(sess, train_records, ner_processor)
train_nums_batch += 1
with tf.io.gfile.GFile(FLAGS.evaluate_file, 'r') as evaluate_f:
evaluate_nums_batch = 0
evaluate_accuracy, evaluate_loss = 0.0, 0.0
evaluate_records = []
for evaluate_line in evaluate_f:
if len(evaluate_records) == FLAGS.batch_size:
_accuracy, _loss, correct_num = self.evaluate_step(sess,
evaluate_records,
ner_processor)
evaluate_nums_batch += 1
evaluate_accuracy += _accuracy
evaluate_loss += _loss
evaluate_records = []
evaluate_records.append(evaluate_line.strip())
if len(evaluate_records) > 0:
_accuracy, _loss, correct_num = self.evaluate_step(sess,
evaluate_records,
ner_processor)
evaluate_nums_batch += 1
evaluate_accuracy += _accuracy
evaluate_loss += _loss
evaluate_accuracy, evaluate_loss = round(
(evaluate_accuracy / evaluate_nums_batch), 2), round(
(evaluate_loss / evaluate_nums_batch), 2)
if evaluate_accuracy >= max_acc:
max_acc = evaluate_accuracy
saver.save(sess=sess, save_path=self.model_path)
tf.summary.FileWriter(self.summary_path, graph=sess.graph)
tf.logging.info(
'%d batch: train accuracy is %f, train loss is %f; evaluate accuracy is %f, evaluate loss is %f\n' % (
train_nums_batch, round(accuracy, 6), round(loss, 6),
round(max_acc, 6),
round(evaluate_loss, 6)))
saver.save(sess=sess,
save_path=os.path.join(FLAGS.output_dir, 'model/bert_ner_model_final'))
tf.summary.FileWriter(self.summary_path, graph=sess.graph)
def predict(self, records):
ner_processor = NerProcessor()
examples = ner_processor.get_test_examples(records)
texts = []
for example in examples: texts.append(' ' + example.text)
input_ids, input_mask, segment_ids, label_ids = convert_batch_example(examples,
max_seq_length=FLAGS.max_seq_length,
vocab=self.vocab,
label_dicts=self.label_dicts)
tags = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess=sess,
save_path=self.model_path)
# self.export_model(sess, '../../export_model/21')
viterbi_sequence = sess.run(self.viterbi_sequence,
feed_dict={self.input_ids: input_ids, self.input_mask: input_mask,
self.segment_ids: segment_ids, self.label_ids: label_ids,
self.dropout: 1.0})
for row in viterbi_sequence:
tags.append([self.id_to_label[ele] for ele in row])
entities = []
entity_list = []
for text, tag in zip(texts, tags):
entities.append(self.parse_entities(text, tag))
for entity in entities:
entity_tags = set()
for ele in entity:
entity_tags.add(ele)
entity_list.append(list(entity_tags))
return entity_list
def parse_entities(self, text, tag):
entities = []
i = 0
start = 0
val = ""
while i < len(tag):
if 'B-' in tag[i]:
val = tag[i][2:]
start = i
i += 1
elif 'I-' in tag[i]:
while 'I-' in tag[i]:
i += 1
words = []
for ele in text[start:i]:
words.append(ele)
entities.append('%s,%s' % (''.join(words), val))
else:
i += 1
return entities
def predict2(self, records, export_path):
ner_processor = NerProcessor()
examples = ner_processor.get_test_examples(records)
texts = []
for example in examples:
texts.append(' ' + example.text)
_input_ids, _input_mask, _segment_ids, _label_ids = convert_batch_example(examples,
max_seq_length=FLAGS.max_seq_length,
vocab=self.vocab,
label_dicts=self.label_dicts)
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
tags = []
with tf.Session(graph=tf.Graph(), config=sess_config) as sess:
meta_graph_def = tf.compat.v1.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING],
export_path)
signature = meta_graph_def.signature_def
input_ids = signature['predict_label'].inputs['input_ids'].name
input_mask = signature['predict_label'].inputs["input_mask"].name
segment_ids = signature['predict_label'].inputs[
'segment_ids'].name
# label_ids = signature['predict_label'].inputs['label_ids'].name
# dropout = signature['predict_label'].inputs['dropout'].name
input_ids = sess.graph.get_tensor_by_name(input_ids)
input_mask = sess.graph.get_tensor_by_name(input_mask)
segment_ids = sess.graph.get_tensor_by_name(segment_ids)
# label_ids = sess.graph.get_tensor_by_name(label_ids)
# dropout = sess.graph.get_tensor_by_name(dropout)
viterbi_sequence = signature['predict_label'].outputs['viterbi_sequence'].name
viterbi_sequence = sess.graph.get_tensor_by_name(viterbi_sequence)
predicts = sess.run(viterbi_sequence, feed_dict={input_ids: _input_ids, input_mask: _input_mask,
segment_ids: _segment_ids,
# label_ids: _label_ids, # dropout: 1.0,
})
for row in predicts:
tags.append([self.id_to_label[ele] for ele in row])
entities = []
for text, tag in zip(texts, tags):
entities.append(self.parse_entities(text, tag))
return entities
def export_model(self, sess, export_path):
input_ids = tf.saved_model.utils.build_tensor_info(self.input_ids)
input_mask = tf.saved_model.utils.build_tensor_info(self.input_mask)
segment_ids = tf.saved_model.utils.build_tensor_info(self.segment_ids)
label_ids = tf.saved_model.utils.build_tensor_info(self.label_ids)
dropout = tf.saved_model.utils.build_tensor_info(self.dropout)
viterbi_sequence = tf.saved_model.utils.build_tensor_info(self.viterbi_sequence)
predict_signature = (tf.saved_model.signature_def_utils.build_signature_def(
inputs={'input_ids': input_ids, "input_mask": input_mask, "segment_ids": segment_ids,
"label_ids": label_ids,
'dropout': dropout}, outputs={'viterbi_sequence': viterbi_sequence},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={'predict_label': predict_signature},
legacy_init_op=legacy_init_op)
builder.save()
if __name__ == '__main__':
# bert_crf = BertCRF()
# bert_crf.train()
pass
```
#### File: model/relation_classification/utils.py
```python
import os
import numpy as np
import pandas as pd
import re
base_path = '../../data/SemEval'
class WordEmbeddingLoader(object):
def __init__(self, embedding_path, word_dim):
self.embedding_path = embedding_path
self.word_dim = word_dim
def load_embedding(self):
word2id = dict()
word_vec = list()
word2id['PAD'] = len(word2id)
pad_emb = np.zeros(shape=[self.word_dim], dtype=np.float32)
word_vec.append(pad_emb)
word2id['UNK'] = len(word2id)
pad_emb = np.zeros(shape=[self.word_dim], dtype=np.float32)
word_vec.append(pad_emb)
with open(self.embedding_path, 'r', encoding='utf-8') as f:
for line in f:
fields = line.strip().split(' ')
if len(fields) != self.word_dim + 1:
continue
word, embed = fields[0], fields[1:]
word2id[word] = len(word2id)
word_vec.append(np.asarray(embed, dtype=np.float32))
word_vec = np.asarray(word_vec)
return word2id, word_vec
def map_label_to_id(label_file):
label2id = dict()
with open(label_file, 'r', encoding='utf-8') as f:
for line in f:
label2id[line.strip()] = len(label2id)
return label2id
def entity_context(entity_idx, words):
context = []
if entity_idx >= 1:
context.append(words[entity_idx - 1])
else:
context.append(words[entity_idx])
context.append(words[entity_idx])
if entity_idx < len(words) - 1:
context.append(words[entity_idx + 1])
else:
context.append(words[entity_idx])
return context
def distance(n):
if n < -60:
return 0
elif -60 <= n <= 60:
return n + 61
return 122
def process_batch_data(sentences, label2id, word2id, max_len=128):
texts, pos1, pos2, labels, contexts = [], [], [], [], []
for ele in sentences:
fields = ele.strip().split('\t')
if len(fields) != 2:
continue
sentence = fields[-1].lower()
words = sentence.split(' ')
entity1_start, entity1_end, entity2_start, entity2_end = -1, -1, -1, -1
for i, word in enumerate(words):
if '<e1>' in word and entity1_start == -1:
entity1_start = i
if '<e2>' in word and entity2_start == -1:
entity2_start = i
if '</e1>' in word and entity1_end == -1:
entity1_end = i
if '</e2>' in word and entity2_end == -1:
entity2_end = i
word_list = []
for word in words:
word = re.sub('(?:<e1>|</e1>|</e2>|<e2>)', '', word)
word_list.append(word)
entity_contexts = []
for word in entity_context(entity1_start, word_list):
entity_contexts.append(word2id.get(word, word2id['UNK']))
for word in entity_context(entity2_start, word_list):
entity_contexts.append(word2id.get(word, word2id['UNK']))
contexts.append(entity_contexts)
if len(word_list) > max_len:
word_list = word_list[:max_len]
while len(word_list) < max_len:
word_list.append('PAD')
_texts, _pos1, _pos2 = [], [], []
for i, word in enumerate(word_list):
_texts.append(word2id.get(word, word2id['UNK']))
if i < entity1_start:
_pos1.append(i - entity1_start)
_pos2.append(i - entity2_start)
elif entity1_start <= i <= entity1_end:
_pos1.append(0)
_pos2.append(i - entity1_start)
elif entity1_end < i < entity2_start:
_pos1.append(i - entity1_end)
_pos2.append(i - entity2_start)
elif entity2_start <= i <= entity2_end:
_pos1.append(i - entity1_end)
_pos2.append(0)
elif i > entity2_end:
_pos1.append(i - entity1_end)
_pos2.append(i - entity2_end)
texts.append(_texts)
pos1.append([distance(ele) for ele in _pos1])
pos2.append([distance(ele) for ele in _pos2])
labels.append(label2id[fields[0].strip()])
return texts, pos1, pos2, labels, contexts
if __name__ == '__main__':
# wordEmbed = WordEmbeddingLoader(os.path.join(base_path, 'vector_50.txt'), word_dim=50)
# word2id, word_vec = wordEmbed.load_embedding()
# train_texts = pd.read_csv(os.path.join(base_path, 'train/train.txt'), sep='\t', header=None)
# train_labels = pd.read_csv(os.path.join(base_path, 'train/train_result_full.txt'), sep='\t', header=None)
#
# texts, pos1, pos2, labels, contexts = [], [], [], [], []
# pos_num = -1
# label2id = map_label_to_id(os.path.join(base_path, 'labels.csv'))
#
# sentences, tags = [], []
#
# with open(os.path.join(base_path, 'test_data.csv'), 'r', encoding='utf-8') as f:
# for line in f:
# sentences.append(line.strip())
# texts, pos1, pos2, labels, contexts = process_batch_data(sentences[:1], label2id)
#
# for i in range(len(texts)):
# print(texts[i])
# print(pos1[i])
# print(pos2[i])
# print(labels[i])
# print(contexts[i])
# print('\n')
import json
print(json.dumps({'a': False}, ensure_ascii=False))
pre_doc_list = sorted(
[{'id': 1, 'time': '2022-03-02'}, {'id': 3, 'time': '2022-03-22'}, {'id': 8, 'time': '2021-03-02'}],
key=lambda row: pd.to_datetime(row['time']))
pre_doc_list.reverse()
print(pre_doc_list)
``` |
{
"source": "jiangfeng1124/ChemRxnExtractor",
"score": 2
} |
#### File: chemrxnextractor/train/role_labeling.py
```python
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Tuple
from tqdm.auto import tqdm, trange
from seqeval.metrics import f1_score, precision_score, recall_score
import numpy as np
import torch
from torch import nn
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SequentialSampler
from transformers import AutoConfig, AutoTokenizer
from transformers.data.data_collator import default_data_collator
from transformers import set_seed
from .trainer import IETrainer as Trainer
from chemrxnextractor.models import BertForRoleLabeling, BertCRFForRoleLabeling
from chemrxnextractor.data import RoleDataset, PlainRoleDataset
from chemrxnextractor.data.utils import get_labels
from chemrxnextractor.constants import PROD_START_MARKER, PROD_END_MARKER
from chemrxnextractor.data.role import write_predictions
from chemrxnextractor.utils import create_logger
logger = logging.getLogger(__name__)
SPECIAL_TOKENS = [PROD_START_MARKER, PROD_END_MARKER]
def train(model_args, data_args, train_args):
if (
os.path.exists(train_args.output_dir)
and os.listdir(train_args.output_dir)
and train_args.do_train
and not train_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({train_args.output_dir}) already exists and is not empty."
" Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if train_args.local_rank in [-1, 0] else logging.WARN,
)
# logger = create_logger(name="train_role", save_dir=train_args.output_dir)
logger.info("Training/evaluation parameters %s", train_args)
# Set seed
set_seed(train_args.seed)
labels = get_labels(data_args.labels)
label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}
num_labels = len(labels)
config = AutoConfig.from_pretrained(
model_args.model_name_or_path,
num_labels=num_labels,
id2label=label_map,
label2id={label: i for i, label in enumerate(labels)},
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast,
additional_special_tokens=SPECIAL_TOKENS
)
if model_args.use_crf:
model = BertCRFForRoleLabeling.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
tagging_schema="BIO",
use_cls=model_args.use_cls,
prod_pooler=model_args.prod_pooler
)
else:
model = BertForRoleLabeling.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
use_cls=model_args.use_cls,
prod_pooler=model_args.prod_pooler
)
model.resize_token_embeddings(len(tokenizer))
# Get datasets
train_dataset = (
RoleDataset(
data_file=os.path.join(data_args.data_dir, "train.txt"),
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache
)
if train_args.do_train
else None
)
eval_dataset = (
RoleDataset(
data_file=os.path.join(data_args.data_dir, "dev.txt"),
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache
)
if train_args.do_eval
else None
)
def compute_metrics(predictions, label_ids) -> Dict:
label_list = [[label_map[x] for x in seq] for seq in label_ids]
preds_list = [[label_map[x] for x in seq] for seq in predictions]
return {
"precision": precision_score(label_list, preds_list),
"recall": recall_score(label_list, preds_list),
"f1": f1_score(label_list, preds_list),
}
metrics_fn = compute_metrics
# Initialize our Trainer
trainer = Trainer(
model=model,
args=train_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=metrics_fn,
use_crf=model_args.use_crf
)
# Training
if train_args.do_train:
trainer.train()
# Pass model_path to train() if continue training from an existing ckpt.
# trainer.train(
# model_path=model_args.model_name_or_path
# if os.path.isdir(model_args.model_name_or_path)
# else None
# )
trainer.save_model()
tokenizer.save_pretrained(train_args.output_dir)
# Evaluation
if train_args.do_eval:
logger.info("*** Evaluate ***")
output = trainer.evaluate()
predictions = output['predictions']
label_ids = output['label_ids']
metrics = output["metrics"]
output_eval_file = os.path.join(train_args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in metrics.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
preds_list = [[label_map[x] for x in seq] for seq in predictions]
# Save predictions
write_predictions(
os.path.join(data_args.data_dir, "dev.txt"),
os.path.join(train_args.output_dir, "eval_predictions.txt"),
preds_list
)
# Predict
if train_args.do_predict:
test_dataset = RoleDataset(
data_file=os.path.join(data_args.data_dir, "test.txt"),
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache
)
output = trainer.predict(test_dataset)
predictions = output['predictions']
label_ids = output['label_ids']
metrics = output["metrics"]
# Note: preds_list doesn't contain labels for [Prod] and [/Prod]
preds_list = [[label_map[x] for x in seq] for seq in predictions]
output_test_results_file = os.path.join(train_args.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key, value in metrics.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
# Save predictions
write_predictions(
os.path.join(data_args.data_dir, "test.txt"),
os.path.join(train_args.output_dir, "test_predictions.txt"),
preds_list
)
def predict(model_args, predict_args):
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# logger = create_logger(name="predict_role", save_dir=train_args.output_dir)
logger.info("Predict parameters %s", predict_args)
# Prepare prod-ext task
labels = get_labels(predict_args.labels)
label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}
num_labels = len(labels)
# Load pretrained model and tokenizer
config = AutoConfig.from_pretrained(
model_args.model_name_or_path,
num_labels=num_labels,
id2label=label_map,
label2id={label: i for i, label in enumerate(labels)},
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast,
additional_special_tokens=SPECIAL_TOKENS
)
if model_args.use_crf:
model = BertCRFForRoleLabeling.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
tagging_schema="BIO",
use_cls=model_args.use_cls,
prod_pooler=model_args.prod_pooler,
)
else:
model = BertForRoleLabeling.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
use_cls=model_args.use_cls,
prod_pooler=model_args.prod_pooler,
)
device = torch.device(
"cuda"
if (not predict_args.no_cuda and torch.cuda.is_available())
else "cpu"
)
model = model.to(device)
# load test dataset
test_dataset = PlainRoleDataset(
data_file=predict_args.input_file,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=predict_args.max_seq_length,
overwrite_cache=predict_args.overwrite_cache,
)
sampler = SequentialSampler(test_dataset)
data_loader = DataLoader(
test_dataset,
sampler=sampler,
batch_size=predict_args.batch_size,
collate_fn=default_data_collator
)
logger.info("***** Running Prediction *****")
logger.info(" Num examples = %d", len(data_loader.dataset))
logger.info(" Batch size = %d", predict_args.batch_size)
model.eval()
with open(predict_args.input_file, "r") as f:
all_preds = []
for inputs in tqdm(data_loader, desc="Predicting"):
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(device)
with torch.no_grad():
outputs = model(
input_ids=inputs['input_ids'],
attention_mask=inputs['attention_mask'],
prod_start_mask=inputs['prod_start_mask'],
prod_end_mask=inputs['prod_end_mask'],
prod_mask=inputs['prod_mask'],
token_type_ids=inputs['token_type_ids']
)
logits = outputs[0]
preds = model.decode(logits, mask=inputs['decoder_mask'].bool())
preds_list = [[label_map[x] for x in seq] for seq in preds]
all_preds += preds_list
write_predictions(
predict_args.input_file,
predict_args.output_file,
all_preds,
align="plain"
)
```
#### File: chemrxnextractor/train/trainer.py
```python
import logging
import math
import os
import re
from typing import Any, Callable, Optional
from typing import Dict, List, Tuple, Union
import numpy as np
import torch
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from tqdm.auto import tqdm, trange
from transformers import Trainer
from transformers import PreTrainedModel
# from transformers import is_wandb_available
from transformers import TrainingArguments
from transformers.data.data_collator import DataCollator
from transformers import AdamW, get_linear_schedule_with_warmup
logger = logging.getLogger(__name__)
class IETrainer(Trainer):
"""
IETrainer is inheritated from from transformers.Trainer, optimized for IE tasks.
"""
def __init__(
self,
model: PreTrainedModel,
args: TrainingArguments,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
compute_metrics=None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None,
use_crf: Optional[bool]=False
):
super(IETrainer, self).__init__(
model=model,
args=args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
optimizers=optimizers
)
self.use_crf = use_crf
def get_optimizers(
self,
num_training_steps: int
) -> Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]:
"""
Setup the optimizer and the learning rate scheduler.
"""
if self.optimizers is not None:
return self.optimizers
no_decay = ["bias", "LayerNorm.weight"]
if self.use_crf:
crf = "crf"
crf_lr = self.args.crf_learning_rate
logger.info(f"Learning rate for CRF: {crf_lr}")
optimizer_grouped_parameters = [
{
"params": [
p for n, p in self.model.named_parameters()
if (not any(nd in n for nd in no_decay)) and (crf not in n)
],
"weight_decay": self.args.weight_decay
},
{
"params": [p for p in self.model.crf.parameters()],
"weight_decay": self.args.weight_decay,
"lr": crf_lr
},
{
"params": [
p for n, p in self.model.named_parameters()
if any(nd in n for nd in no_decay) and (not crf not in n)
],
"weight_decay": 0.0,
},
]
else:
optimizer_grouped_parameters = [
{
"params": [
p for n, p in self.model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": self.args.weight_decay,
},
{
"params": [
p for n, p in self.model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=self.args.learning_rate,
eps=self.args.adam_epsilon
)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=self.args.warmup_steps,
num_training_steps=num_training_steps
)
return optimizer, scheduler
def evaluate(self, eval_dataset: Optional[Dataset] = None) -> Dict:
eval_dataloader = self.get_eval_dataloader(eval_dataset)
output = self._prediction_loop(eval_dataloader, description="Evaluation")
self._log(output['metrics'])
return output
def predict(self, test_dataset: Dataset) -> Dict:
test_dataloader = self.get_test_dataloader(test_dataset)
return self._prediction_loop(test_dataloader, description="Prediction")
def _prediction_loop(
self,
dataloader: DataLoader,
description: str
) -> Dict:
"""
Prediction/evaluation loop, shared by `evaluate()` and `predict()`
Works both with or without labels.
"""
model = self.model
batch_size = dataloader.batch_size
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", self.num_examples(dataloader))
logger.info(" Batch size = %d", batch_size)
model.eval()
eval_losses: List[float] = []
preds_ids = []
label_ids = []
for inputs in tqdm(dataloader, desc=description):
has_labels = any(
inputs.get(k) is not None
for k in ["labels", "lm_labels", "masked_lm_labels"]
)
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
step_eval_loss, logits = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
else:
logits = outputs[0]
mask = inputs["decoder_mask"].to(torch.bool)
preds = model.decode(logits, mask=mask)
preds_ids.extend(preds)
if inputs.get("labels") is not None:
labels = [inputs["labels"][i, mask[i]].tolist() \
for i in range(inputs["labels"].shape[0])]
label_ids.extend(labels)
assert len(preds) == len(labels)
assert len(preds[0]) == len(labels[0])
if self.compute_metrics is not None and \
len(preds_ids) > 0 and \
len(label_ids) > 0:
metrics = self.compute_metrics(preds_ids, label_ids)
else:
metrics = {}
if len(eval_losses) > 0:
metrics['eval_loss'] = np.mean(eval_losses)
# Prefix all keys with eval_
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
return {'predictions': preds_ids, 'label_ids': label_ids, 'metrics': metrics}
def _log(self, logs: Dict[str, float], iterator: Optional[tqdm] = None) -> None:
if self.epoch is not None:
logs["epoch"] = self.epoch
if self.global_step is None:
# when logging evaluation metrics without training
self.global_step = 0
# if is_wandb_available():
# if self.is_world_master():
# wandb.log(logs, step=self.global_step)
output = {**logs, **{"step": self.global_step}}
if iterator is not None:
iterator.write(output)
else:
logger.info(
{k:round(v, 4) if isinstance(v, float) else v for k, v in output.items()}
)
``` |
{
"source": "jiangfeng1124/transfer",
"score": 3
} |
#### File: msda-src/model_utils/__init__.py
```python
from .lstm import LSTM
from .cnn import CNN
from .mlp import MLP
from .tagger import Tagger
MODEL_CLASS = {
'lstm' : LSTM,
'gru' : LSTM,
'cnn' : CNN,
'mlp' : MLP,
'tagger': Tagger,
}
from .domain_critic import ClassificationD, MMD, CoralD, WassersteinD
CRITIC_CLASS = {
'dann' : ClassificationD,
'mmd' : MMD,
'coral' : CoralD,
'wd' : WassersteinD
}
def get_model_class(model_name):
model_name = model_name.lower()
model = MODEL_CLASS.get(model_name, None)
if model is None:
raise Exception("Unknown model class: {}".format(
model_name
))
return model
def get_critic_class(critic_name):
critic_name = critic_name.lower()
critic = CRITIC_CLASS.get(critic_name, None)
if critic is None:
raise Exception("Unknown critic class: {}".format(
critic_name
))
return critic
def get_decoder_model_class():
model_name = "lstm_decoder"
model = MODEL_CLASS.get(model_name, None)
if model is None:
raise Exception("Unknown model class: {}".format(
model_name
))
return model
``` |
{
"source": "jiangfengbing/pic2alioss",
"score": 2
} |
#### File: pic2alioss/src/saveconfig.py
```python
import sys
import json
__author__ = "<NAME>"
__copyright__ = "Copyright 2020 <NAME>. All rights reserved."
def main():
try:
lines = sys.stdin.readlines()
data = []
for line in lines:
line = line.rstrip()
data.append(line)
config = {
'bucket': data[0],
'endpoint': data[1],
'accessKeyId': data[2],
'accessKeySecret': data[3],
'urlPrefix': data[4]
}
open('config.json', 'w').write(json.dumps(config))
print('配置成功!')
return
except:
pass
print('配置失败!')
if __name__ == '__main__':
main()
``` |
{
"source": "jiangfuqing/Chip-seq",
"score": 2
} |
#### File: Chip-seq/bin/check_design.py
```python
import os
import sys
import requests
import argparse
############################################
############################################
## PARSE ARGUMENTS
############################################
############################################
Description = 'Reformat nf-core/chipseq design file and check its contents.'
Epilog = """Example usage: python reformat_design.py <DESIGN_FILE_IN> <DESIGN_FILE_OUT>"""
argParser = argparse.ArgumentParser(description=Description, epilog=Epilog)
## REQUIRED PARAMETERS
argParser.add_argument('DESIGN_FILE', help="Input design file.")
argParser.add_argument('READ_MAPPING_FILE', help="Output design file containing sample ids and reads.")
argParser.add_argument('CONTROL_MAPPING_FILE', help="Output design file containing ip vs control mappings.")
args = argParser.parse_args()
############################################
############################################
## MAIN FUNCTION
############################################
############################################
def reformat_design(DesignFile,ReadMappingFile,ControlMappingFile):
ERROR_STR = 'ERROR: Please check design file'
HEADER = ['group', 'replicate', 'fastq_1', 'fastq_2', 'antibody', 'control']
## CHECK HEADER
fin = open(DesignFile,'r')
header = fin.readline().strip().split(',')
if header != HEADER:
print "{} header: {} != {}".format(ERROR_STR,','.join(header),','.join(HEADER))
sys.exit(1)
numColList = []
sampleMappingDict = {}
antibodyDict = {}
while True:
line = fin.readline()
if line:
lspl = [x.strip() for x in line.strip().split(',')]
group,replicate,fastQFiles,antibody,control = lspl[0],lspl[1],[x for x in lspl[2:-2] if x],lspl[-2],lspl[-1]
## CHECK VALID NUMBER OF COLUMNS PER SAMPLE
numCols = len(lspl)
if numCols not in [6]:
print "{}: Invalid number of columns (should be 6)!\nLine: '{}'".format(ERROR_STR,line.strip())
sys.exit(1)
numColList.append(numCols)
## CHECK GROUP ID DOESNT CONTAIN SPACES
if group.find(' ') != -1:
print "{}: Group id contains spaces!\nLine: '{}'".format(ERROR_STR,line.strip())
sys.exit(1)
## CHECK REPLICATE COLUMN IS INTEGER
if not replicate.isdigit():
print "{}: Replicate id not an integer!\nLine: '{}'".format(ERROR_STR,line.strip())
sys.exit(1)
replicate = int(replicate)
for fastq in fastQFiles:
## CHECK FASTQ FILE EXTENSION
if fastq[-9:] != '.fastq.gz' and fastq[-6:] != '.fq.gz':
print "{}: FastQ file has incorrect extension (has to be '.fastq.gz' or 'fq.gz') - {}\nLine: '{}'".format(ERROR_STR,fastq,line.strip())
sys.exit(1)
## CREATE GROUP MAPPING DICT = {GROUP_ID: {REPLICATE_ID:[[FASTQ_FILES]]}
if not sampleMappingDict.has_key(group):
sampleMappingDict[group] = {}
if not sampleMappingDict[group].has_key(replicate):
sampleMappingDict[group][replicate] = []
sampleMappingDict[group][replicate].append(fastQFiles)
## CHECK BOTH ANTIBODY AND CONTROL COLUMNS HAVE VALID VALUES
if antibody:
if antibody.find(' ') != -1:
print "{}: Antibody id contains spaces!\nLine: '{}'".format(ERROR_STR,line.strip())
sys.exit(1)
if not control:
print "{}: both Antibody and Control must be specified!\nLine: '{}'".format(ERROR_STR,line.strip())
sys.exit(1)
if control:
if control.find(' ') != -1:
print "{}: Control id contains spaces!\nLine: '{}'".format(ERROR_STR,line.strip())
sys.exit(1)
if not antibody:
print "{}: both Antibody and Control must be specified!\nLine: '{}'".format(ERROR_STR,line.strip())
sys.exit(1)
## CREATE ANTIBODY MAPPING CONTROL DICT
if antibody and control:
antibodyDict[group] = (antibody,control)
else:
fin.close()
break
## CHECK IF DATA IS PAIRED-END OR SINGLE-END AND NOT A MIXTURE
if min(numColList) != max(numColList):
print "{}: Mixture of paired-end and single-end reads!".format(ERROR_STR)
sys.exit(1)
## CHECK IF ANTIBODY AND CONTROL COLUMNS HAVE BEEN SPECIFIED AT LEAST ONCE
if len(antibodyDict) == 0:
print "{}: Antibody and Control must be specified at least once!".format(ERROR_STR)
sys.exit(1)
## WRITE READ MAPPING FILE
antibodyGroupDict = {}
fout = open(ReadMappingFile,'w')
fout.write(','.join(['sample_id','fastq_1','fastq_2']) + '\n')
for group in sorted(sampleMappingDict.keys()):
## CHECK THAT REPLICATE IDS ARE IN FORMAT 1..<NUM_REPLICATES>
uniq_rep_ids = set(sampleMappingDict[group].keys())
if len(uniq_rep_ids) != max(uniq_rep_ids):
print "{}: Replicate IDs must start with 1..<num_replicates>\nGroup: {}, Replicate IDs: {}".format(ERROR_STR,group,list(uniq_rep_ids))
sys.exit(1)
## RECONSTRUCT LINE FOR SAMPLE IN DESIGN
for replicate in sorted(sampleMappingDict[group].keys()):
for idx in range(len(sampleMappingDict[group][replicate])):
fastQFiles = sampleMappingDict[group][replicate][idx]
## GET SAMPLE_ID,FASTQ_1,FASTQ_2 COLUMNS
sample_id = "{}_R{}_T{}".format(group,replicate,idx+1)
oList = [sample_id] + fastQFiles
if len(fastQFiles) == 1:
oList += ['']
fout.write(','.join(oList) + '\n')
## EXTRAPOLATE CONTROL COLUMN
if antibodyDict.has_key(group):
antibody,control = antibodyDict[group]
if control in sampleMappingDict.keys():
control_id = "{}_R1".format(control)
if sampleMappingDict[control].has_key(replicate):
control_id = "{}_R{}".format(control,replicate)
if not antibodyGroupDict.has_key(antibody):
antibodyGroupDict[antibody] = {}
if not antibodyGroupDict[antibody].has_key(group):
antibodyGroupDict[antibody][group] = []
antibodyList = [sample_id[:-3],control_id]
if not antibodyList in antibodyGroupDict[antibody][group]:
antibodyGroupDict[antibody][group].append(antibodyList)
else:
print "{}: Control id not a valid group\nControl id: {}, Valid Groups: {}".format(ERROR_STR,groupControlDict[group],sorted(sampleMappingDict.keys()))
sys.exit(1)
fout.close()
## WRITE SAMPLE TO CONTROL MAPPING FILE
fout = open(ControlMappingFile,'w')
fout.write(','.join(['sample_id','control_id','antibody','replicatesExist','multipleGroups']) + '\n')
for antibody in sorted(antibodyGroupDict.keys()):
repsExist = '0'
if max([len(x) for x in antibodyGroupDict[antibody].values()]) > 1:
repsExist = '1'
multipleGroups = '0'
if len(antibodyGroupDict[antibody].keys()) > 1:
multipleGroups = '1'
for group in sorted(antibodyGroupDict[antibody].keys()):
for antibodyList in antibodyGroupDict[antibody][group]:
fout.write(','.join(antibodyList+[antibody,repsExist,multipleGroups]) + '\n')
fout.close()
############################################
############################################
## RUN FUNCTION
############################################
############################################
reformat_design(DesignFile=args.DESIGN_FILE,ReadMappingFile=args.READ_MAPPING_FILE,ControlMappingFile=args.CONTROL_MAPPING_FILE)
############################################
############################################
############################################
############################################
``` |
{
"source": "jiangfuqing/SpatialDE",
"score": 2
} |
#### File: Analysis/Comparison/simulate_data.py
```python
import click
import numpy as np
from scipy import stats
import pandas as pd
from tqdm import tqdm
import SpatialDE
@click.command()
@click.argument('prefix')
@click.argument('n_samples', default=100)
@click.argument('n_genes', default=10000)
def make_data(prefix, n_samples, n_genes):
X = np.random.uniform(size=(n_samples, 1), low=0, high=100)
I = np.eye(n_samples)
exp_tab = pd.DataFrame(index=range(n_samples))
names = ['GP{}'.format(i) for i in range(n_genes)]
true_values = pd.DataFrame(index=names, columns=['l', 'mu', 's2_t', 's2_e'])
for g in tqdm(names):
while True:
l = np.exp(np.random.uniform(low=np.log(0.1), high=np.log(100)))
mu = np.random.uniform(low=0., high=5.)
s2_t = np.exp(np.random.uniform(low=-5., high=5.))
s2_e = np.exp(np.random.uniform(low=-5., high=5.))
K = SpatialDE.base.SE_kernel(X, l)
mu1 = mu * np.ones((n_samples,))
K_total = (s2_t * K + s2_e * I)
try:
y = np.random.multivariate_normal(mu1, K_total)
except np.linalg.linalg.LinAlgError:
continue
else:
break
exp_tab[g] = y
true_values.loc[g, 'l'] = l
true_values.loc[g, 'mu'] = mu
true_values.loc[g, 's2_t'] = s2_t
true_values.loc[g, 's2_e'] = s2_e
pd.DataFrame(X, index=exp_tab.index).to_csv(prefix + '_X.csv')
exp_tab.to_csv(prefix + '_expression.csv')
true_values.to_csv(prefix + '_truth.csv')
if __name__ == '__main__':
make_data()
```
#### File: Analysis/Frog/temporal_Frog_analysis.py
```python
import numpy as np
import pandas as pd
import NaiveDE
import SpatialDE
def main():
# Get time points for each sample
sample_info = pd.read_csv('Frog_sample_info.csv', index_col=0)
# Load expression
df = pd.read_csv('data/GSE65785_clutchApolyA_relative_TPM.csv', index_col=0)
df = df[sample_info.index]
df = df[df.sum(1) >= 3] # Filter practically unobserved genes
X = sample_info[['hpf']]
# Convert expression data to log scale, with genes in columns
dfm = NaiveDE.stabilize(df)
res = NaiveDE.regress_out(sample_info, dfm, 'np.log(ERCC) + np.log(num_genes)', rcond=1e-4).T
# Add technical factors as pseudogenes for reference
res['log_num_genes'] = np.log(sample_info['num_genes'])
res['log_ERCC'] = np.log(sample_info['ERCC'])
# Perform Spatial DE test with default settings
results = SpatialDE.run(X, res)
# Save results and annotation in files for interactive plotting and interpretation
results.to_csv('Frog_final_results.csv')
de_results = results[(results.qval < 0.05)].copy()
ms_results = SpatialDE.model_search(X, res, de_results)
ms_results.to_csv('Frog_MS_results.csv')
return results
if __name__ == '__main__':
results = main()
```
#### File: Analysis/MERFISH/spatial_MERFISH_analysis.py
```python
import numpy as np
import pandas as pd
import NaiveDE
import SpatialDE
def main():
df = pd.read_csv('data/rep6/middle_exp_mat.csv', index_col=0)
df = df.T[df.sum(0) >= 3].T # Filter practically unobserved genes
# Get coordinates for each sample
sample_info = pd.read_csv('data/rep6/middle_sample_info.csv', index_col=0)
df = df.loc[sample_info.index]
X = sample_info[['abs_X', 'abs_Y']]
# Convert data to log-scale, and account for depth
dfm = NaiveDE.stabilize(df.T).T
res = NaiveDE.regress_out(sample_info, dfm.T, 'np.log(total_count)').T
# Add total_count as pseudogene for reference
res['log_total_count'] = np.log(sample_info['total_count'])
# Perform Spatial DE test with default settings
results = SpatialDE.run(X, res)
# Assign pi_0 = 1 in multiple testing correction
results['qval'] = SpatialDE.util.qvalue(results['pval'], pi0=1.0)
# Save results and annotation in files for interactive plotting and interpretation
sample_info.to_csv('middle_sample_info.csv')
results.to_csv('middle_final_results.csv')
de_results = results[(results.qval < 0.05)].copy()
ms_results = SpatialDE.model_search(X, res, de_results)
ms_results.to_csv('middle_MS_results.csv')
return results
if __name__ == '__main__':
results = main()
```
#### File: Analysis/MouseOB/time_AEH.py
```python
from time import time
import numpy as np
import pandas as pd
import NaiveDE
from GPy import kern
import GPclust
def main():
sample_info = pd.read_csv('MOB_sample_info.csv', index_col=0)
df = pd.read_csv('data/Rep11_MOB_0.csv', index_col=0)
df = df.loc[sample_info.index]
df = df.T[df.sum(0) >= 3].T # Filter practically unobserved genes
dfm = NaiveDE.stabilize(df.T).T
res = NaiveDE.regress_out(sample_info, dfm.T, 'np.log(total_counts)').T
X = sample_info[['x', 'y']].values
times = pd.DataFrame(columns=['N', 'time'])
Ns = [50, 100, 200, 300, 500, 750, 1000, 2000]
j = 0
for N in Ns:
for i in range(5):
Y = res.sample(N, axis=1).values.T
t0 = time()
m = GPclust.MOHGP(
X=X,
Y=Y,
kernF=kern.RBF(2) + kern.Bias(2),
kernY=kern.RBF(1) + kern.White(1),
K=5,
prior_Z='DP'
)
m.hyperparam_opt_args['messages'] = False
m.optimize(step_length=0.1, verbose=False, maxiter=2000)
times.loc[j] = [N, time() - t0]
print(times.loc[j])
j += 1
times.to_csv('AEH_times.csv')
if __name__ == '__main__':
results = main()
```
#### File: Analysis/SeqFISH/spatial_SeqFISH_analysis.py
```python
import numpy as np
import pandas as pd
import NaiveDE
import SpatialDE
def main():
df = pd.read_csv('exp_mat_43.csv', index_col=0)
df.columns = df.columns.map(int)
# Get coordinates for each sample
sample_info = pd.read_csv('sample_info_43.csv', index_col=0)
df = df[sample_info.index]
X = sample_info[['x', 'y']]
# Convert data to log-scale, and account for depth
dfm = NaiveDE.stabilize(df.T).T
res = NaiveDE.regress_out(sample_info, dfm, 'np.log(total_count)').T
# Add total_count as pseudogene for reference
res['log_total_count'] = np.log(sample_info['total_count'])
# Perform Spatial DE test with default settings
results = SpatialDE.run(X, res)
# Save results and annotation in files for interactive plotting and interpretation
results.to_csv('final_results_43.csv')
de_results = results[(results.qval < 0.05)].copy()
ms_results = SpatialDE.model_search(X, res, de_results)
ms_results.to_csv('MS_results_43.csv')
return results
if __name__ == '__main__':
results = main()
```
#### File: Theory/Bootstraps/bootstraps_CB_analysis.py
```python
import click
import numpy as np
import pandas as pd
import NaiveDE
import SpatialDE
def get_coords(index):
coords = pd.DataFrame(index=index)
coords['x'] = index.str.split('x').str.get(0).map(float)
coords['y'] = index.str.split('x').str.get(1).map(float)
return coords
@click.command()
@click.argument('out_file')
def main(out_file):
df = pd.read_table('../../BreastCancer/data/Layer2_BC_count_matrix-1.tsv', index_col=0)
df = df.T[df.sum(0) >= 3].T # Filter practically unobserved genes
sample_info = get_coords(df.index)
sample_info['total_counts'] = df.sum(1)
sample_info = sample_info.query('total_counts > 5') # Remove empty features
# Bootstrap sampling 80% of data
sample_info = sample_info.sample(frac=0.8)
df = df.loc[sample_info.index]
X = sample_info[['x', 'y']]
dfm = NaiveDE.stabilize(df.T).T
res = NaiveDE.regress_out(sample_info, dfm.T, 'np.log(total_counts)').T
results = SpatialDE.run(X, res)
results.to_csv(out_file)
return results
if __name__ == '__main__':
results = main()
```
#### File: SpatialDE/gp_fits/fit_gps.py
```python
import click
import numpy as np
import pandas as pd
import GPflow
from tqdm import tqdm
def get_coords(index):
coords = pd.DataFrame(index=index)
coords['x'] = index.str.split('x').str.get(0).map(float)
coords['y'] = index.str.split('x').str.get(1).map(float)
return coords
@click.command()
@click.argument('expression_csv')
@click.argument('results_csv')
def fit_gps(expression_csv, results_csv):
df = pd.read_csv(expression_csv, index_col=0)
coords = get_coords(df.index)
X = coords[['x', 'y']].as_matrix()
Y = np.log10(df.iloc[:, 0].map(float)[:, None] + 1)
k = GPflow.kernels.RBF(2, ARD=False) + GPflow.kernels.Constant(2)
m = GPflow.gpr.GPR(X, Y, k)
m_init = m.get_free_state() * 0 + 1.
k_flat = GPflow.kernels.Constant(2)
m_flat = GPflow.gpr.GPR(X, Y, k_flat)
m_flat_init = m_flat.get_free_state() * 0 + 1.
results = pd.DataFrame(index=df.columns)
results['lengthscale'] = np.nan
results['rbf_ll'] = np.nan
results['constant_ll'] = np.nan
for g in tqdm(df.columns):
m.Y = np.log10(df[g].map(float)[:, None] + 1)
m.set_state(m_init)
o = m.optimize()
m_flat.Y = m.Y.value
m_flat.set_state(m_flat_init)
o_flat = m_flat.optimize()
results.loc[g, 'lengthscale'] = m.get_parameter_dict()['model.kern.rbf.lengthscales'][0]
results.loc[g, 'rbf_ll'] = -o.fun
results.loc[g, 'constant_ll'] = -o_flat.fun
results.to_csv(results_csv)
if __name__ == '__main__':
fit_gps()
```
#### File: Python-module/SpatialDE/aeh.py
```python
import logging
import numpy as np
import pandas as pd
from scipy import optimize
from . import base
# Variational update functions
def Q_Z_expectation(mu, Y, s2e, N, C, G, pi=None):
if pi is None:
pi = np.ones(C) / C
log_rho = np.log(pi[None, :]) \
- 0.5 * N * np.log(s2e) \
- 0.5 * np.sum((mu.T[None, :, :] - Y[:, None, :]) ** 2, 2) / s2e \
- 0.5 * N * np.log(2 * np.pi)
# Subtract max per row for numerical stability, and add offset from 0 for same reason.
rho = np.exp(log_rho - log_rho.max(1)[:, None]) + 1e-12
# Then evaluate softmax
r = (rho.T / (rho.sum(1))).T
return r
def Q_mu_k_expectation(Z_k, Y, K, s2e):
y_k_tilde = np.dot(Z_k, Y) / s2e
Sytk = np.dot(K, y_k_tilde)
IpSDk = np.eye(K.shape[0]) + K * Z_k.sum() / s2e
m_k = np.linalg.solve(IpSDk, Sytk)
return m_k
def Q_mu_expectation(Z, Y, K, s2e):
m = np.zeros((Y.shape[1], Z.shape[1]))
y_k_tilde = np.dot(Z.T, Y).T / s2e
for k in range(Z.shape[1]):
m[:, k] = Q_mu_k_expectation(Z[:, k], Y, K, s2e)
return m
# Log expectation functions for the ELBO
def ln_P_YZms(Y, Z, mu, s2e, pi=None):
''' Expecation of ln P(Y | Z, mu, s2e)
'''
G = Y.shape[0]
N = Y.shape[1]
C = Z.shape[1]
if pi is None:
pi = np.ones(C) / C
log_rho = np.log(pi[None, :]) \
- 0.5 * N * np.log(s2e) \
- 0.5 * np.sum((mu.T[None, :, :] - Y[:, None, :]) ** 2, 2) / s2e \
- 0.5 * N * np.log(2 * np.pi)
return (Z * log_rho).sum()
def ln_P_mu(mu, K):
''' Expectation of ln P(mu)
'''
N = K.shape[0]
C = mu.shape[1]
ll = 0
for k in range(C):
ll = ll + np.linalg.det(K)
ll = ll + mu[:, k].dot(np.linalg.solve(K, mu[:, k]))
ll = ll + N * np.log(2 * np.pi)
ll = -0.5 * ll
return ll
def ln_P_Z(Z, pi=None):
''' Expectation of ln P(Z)
'''
C = Z.shape[1]
if pi is None:
pi = np.ones(C) / C
return np.dot(Z, np.log(pi)).sum()
def ln_Q_mu(K, Z, s2e):
''' Expecation of ln Q(mu)
'''
N = K.shape[0]
C = Z.shape[1]
G_k = Z.sum(0)
ll = 0
U, S = base.factor(K)
for k in range(C):
ll = ll - (1. / S + G_k[k] / s2e).sum()
ll = ll + N * np.log(2 * np.pi)
ll = -0.5 * ll
return ll
def ln_Q_Z(Z, r):
''' Expectation of ln Q(Z)
'''
return np.sum(Z * np.log(r))
# ELBO and ELBO objective
def ELBO(Y, r, m, s2e, K, K_0, s2e_0, pi=None):
L = ln_P_YZms(Y, r, m, s2e, pi) + ln_P_Z(r, pi) + ln_P_mu(m, K) \
- ln_Q_Z(r, r) - ln_Q_mu(K_0, r, s2e_0)
return L
def make_elbojective(Y, r, m, X, K_0, s2e_0, pi=None):
def elbojective(log_s2e):
return -ELBO(Y, r, m, np.exp(log_s2e), K_0, K_0, s2e_0, pi)
return elbojective
# Model fitting
def fit_patterns(X, Y, C, l, s2e_0=1.0, verbosity=0, maxiter=100, printerval=1, opt_interval=1, delta_elbo_threshold=1e-2):
''' Fit spatial patterns using Automatic Expression Histology.
X : Spatial coordinates
Y : Gene expression values
C : The number of patterns
l : The charancteristic length scale of the clusters
Returns
final_elbo : The final ELBO value.
m : The posterior mean underlying expression values for each cluster.
r : The posterior pattern assignment probabilities for each gene and pattern.
s2e : The estimated noise parameter of the model
'''
# Set up constants
G = Y.shape[0]
N = Y.shape[1]
eps = 1e-8 * np.eye(N)
s2e = s2e_0
K = base.SE_kernel(X, l) + eps
# Randomly initialize
r = np.random.uniform(size=(G, C))
r = r / r.sum(0)
pi = r.sum(0) / G
m = np.random.normal(size=(N, C))
elbo_0 = ELBO(Y, r, m, s2e, K, K, s2e, pi)
elbo_1 = elbo_0
if verbosity > 0:
print('iter {}, ELBO: {:0.2e}'.format(0, elbo_1))
if verbosity > 1:
print()
for i in range(maxiter):
if (i % opt_interval == (opt_interval - 1)):
elbojective = make_elbojective(Y, r, m, X, K, s2e, pi)
o = optimize.minimize_scalar(elbojective)
s2e = np.exp(o.x)
r = Q_Z_expectation(m, Y, s2e, N, C, G, pi)
m = Q_mu_expectation(r, Y, K, s2e)
pi = r.sum(0) / G
elbo_0 = elbo_1
elbo_1 = ELBO(Y, r, m, s2e, K, K, s2e, pi)
delta_elbo = np.abs(elbo_1 - elbo_0)
if verbosity > 0 and (i % printerval == 0):
print('iter {}, ELBO: {:0.2e}, delta_ELBO: {:0.2e}'.format(i + 1, elbo_1, delta_elbo))
if verbosity > 1:
print('ln(l): {:0.2f}, ln(s2e): {:.2f}'.format(np.log(l), np.log(s2e)))
if verbosity > 2:
line1 = 'P(Y | Z, mu, s2e): {:0.2e}, P(Z): {:0.2e}, P(mu): {:0.2e}' \
.format(ln_P_YZms(Y, r, m, s2e, pi), ln_P_Z(r, pi), ln_P_mu(m, K))
line2 = 'Q(Z): {:0.2e}, Q(mu): {:0.2e}'.format(ln_Q_Z(r, r), ln_Q_mu(K, r, s2e))
print(line1 + '\n' + line2)
if verbosity > 1:
print()
if delta_elbo < delta_elbo_threshold:
if verbosity > 0:
print('Converged on iter {}'.format(i + 1))
break
else:
print('Warning! ELBO dit not converge after {} iters!'.format(i + 1))
final_elbo = ELBO(Y, r, m, s2e, K, K, s2e, pi)
return final_elbo, m, r, s2e
def spatial_patterns(X, exp_mat, DE_mll_results, C, l, **kwargs):
''' Group spatially variable genes into spatial patterns using
automatic expression histology (AEH).
X : Spatial coordinates
exp_mat : Expression matrix, appropriately normalised.
DE_mll_results : Results table from SpatialDE, after filtering
for significance level.
C : The number of spatial patterns
**kwards are passed on to the function fit_patterns()
Returns
pattern_results : A DataFrame with pattern membership information
for each gene
patterns : The posterior mean underlying expression for genes in
given spatial patterns.
'''
Y = exp_mat[DE_mll_results['g']].values.T
# This is important, we only care about co-expression, not absolute levels.
Y = (Y.T - Y.mean(1)).T
Y = (Y.T / Y.std(1)).T
_, m, r, _ = fit_patterns(X, Y, C, l, **kwargs)
cres = pd.DataFrame({'g': DE_mll_results['g'],
'pattern': r.argmax(1),
'membership': r.max(1)})
m = pd.DataFrame.from_records(m)
m.index = exp_mat.index
return cres, m
```
#### File: Python-module/SpatialDE/anndata.py
```python
import logging
import pandas as pd
import NaiveDE
from .aeh import spatial_patterns
from .base import run
from .util import qvalue
def spatialde_test(adata, coord_columns=['x', 'y'], regress_formula='np.log(total_counts)'):
''' Run the SpatialDE test on an AnnData object
Parameters
----------
adata: An AnnData object with counts in the .X field.
coord_columns: A list with the columns of adata.obs which represent spatial
coordinates. Default ['x', 'y'].
regress_formula: A patsy formula for linearly regressing out fixed effects
from columns in adata.obs before fitting the SpatialDE models.
Default is 'np.log(total_counts)'.
Returns
-------
results: A table of spatial statistics for each gene.
'''
logging.info('Performing VST for NB counts')
adata.layers['stabilized'] = NaiveDE.stabilize(adata.X.T).T
logging.info('Regressing out fixed effects')
adata.layers['residual'] = NaiveDE.regress_out(adata.obs,
adata.layers['stabilized'].T,
regress_formula).T
X = adata.obs[coord_columns].values
expr_mat = pd.DataFrame.from_records(adata.layers['residual'],
columns=adata.var.index,
index=adata.obs.index)
results = run(X, expr_mat)
# Clip 0 pvalues
min_pval = results.query('pval > 0')['pval'].min() / 2
results['pval'] = results['pval'].clip_lower(min_pval)
# Correct for multiple testing
results['qval'] = qvalue(results['pval'], pi0=1.)
return results
def automatic_expression_histology(adata, filtered_results, C, l,
coord_columns=['x', 'y'], layer='residual', **kwargs):
''' Fit the Automatic Expression Histology (AEH) model to
expression in an AnnData object.
Parameters
----------
adata: An AnnData object with a layer of stabilized expression values
filtered_results: A DataFrame with the signifificant subset of results
from the SpatialDE significance test.
C: integer, the number of hidden spatial patterns.
l: float, the common lengthscale for the hidden spatial patterns.
coord_columns: A list with the columns of adata.obs which represent spatial
coordinates. Default ['x', 'y'].
layer: A string indicating the layer of adata to fit the AEH model to.
By defualt uses the 'residual' layer.
Remaining arguments are passed to SpatialDE.aeh.spatial_patterns()
Returns
-------
(histology_results, patterns)
histology_results: DataFrame with pattern membership information for each gene.
patterns: DataFrame with the inferred hidden spatial functions the genes belong to
evaluated at all points in the data.
'''
X = adata.obs[coord_columns].values
expr_mat = pd.DataFrame.from_records(adata.layers[layer],
columns=adata.var.index,
index=adata.obs.index)
logging.info('Performing Automatic Expression Histology')
histology_results, patterns = spatial_patterns(X, expr_mat, filtered_results,
C, l, **kwargs)
return histology_results, patterns
```
#### File: jiangfuqing/SpatialDE/test-FaST-GP.py
```python
import logging
logging.basicConfig(level=logging.DEBUG)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
fgp = __import__('FaST-GP')
ds = __import__('data_simulation')
def get_coords(index):
coords = pd.DataFrame(index=index)
coords['x'] = index.str.split('x').str.get(0).map(float)
coords['y'] = index.str.split('x').str.get(1).map(float)
return coords
def main():
df = pd.read_csv('data/Rep12_MOB_1.csv', index_col=0)
sample_info = get_coords(df.index)
# Run workflow
X = sample_info[['x', 'y']]
dfm = np.log10(df + 1)
l = 10
results = fgp.dyn_de(X, dfm, lengthscale=l, num=32)
plt.scatter(results['max_delta'], results['max_ll'], c='k')
plt.xscale('log')
plt.xlim(np.exp(-11), np.exp(11))
plt.xlabel('$\delta$')
plt.ylabel('Maximum Log Likelihood')
plt.title('lengthscale: {}'.format(l))
plt.savefig('fastgp-fits.png', bbox_inches='tight')
print(results.sort_values('max_delta').head(20))
def plot_LL_curves():
# df = pd.read_csv('data/Rep12_MOB_3.csv', index_col=0)
# sample_info = get_coords(df.index)
# X = sample_info[['x', 'y']]
# dfm = np.log10(df + 1).sample(10, axis=1)
l = 10
X, dfm, true_vals = ds.make_ls_data(l, 250, 10)
true_vals['delta'] = true_vals['s2_e'] / true_vals['s2_t']
K = fgp.SE_kernel(X, l)
U, S = fgp.factor(K)
UT1 = fgp.get_UT1(U)
n, G = dfm.shape
for g in range(G):
y = dfm.iloc[:, g]
UTy = fgp.get_UTy(U, y)
LLs = []
delta_range = np.logspace(base=np.e, start=-10, stop=10, num=32)
max_ll = -np.inf
max_delta = np.nan
for delta in delta_range:
cur_ll = fgp.LL(delta, UTy, UT1, S, n)
LLs.append(cur_ll)
if cur_ll > max_ll:
max_ll = cur_ll
max_delta = delta
plt.subplot(np.ceil(G / 2.), 2, g + 1)
plt.plot(delta_range, LLs, marker='o', markeredgecolor='w', markersize=2, markeredgewidth=1, c='k')
plt.scatter([max_delta], [max_ll], marker='v', c='r', edgecolor='none', zorder=5)
plt.title(dfm.columns[g])
plt.axvline(true_vals.iloc[g, -1], color='r')
plt.xscale('log')
plt.xlim(np.exp(-11), np.exp(11))
plt.savefig('example_grids.png')
def opt_simulation():
l = 10
logging.info('Sampling ground truth data...')
X, dfm, true_vals = ds.make_ls_data(10, 500, 500)
logging.info('Done')
results = fgp.dyn_de(X, dfm, lengthscale=l, num=32)
true_vals['delta'] = true_vals['s2_e'] / true_vals['s2_t']
plt.subplot(3, 1, 1)
plt.scatter(results['max_delta'], true_vals['delta'], c='k', label=None)
plt.xscale('log')
plt.xlim(np.exp(-11.), np.exp(11.))
plt.yscale('log')
plt.ylim(np.exp(-11.), np.exp(11.))
plt.plot([1e-4, 1e4], [1e-4, 1e4], c='r', label='$ x = y $ line')
plt.legend(loc='upper left')
plt.ylabel('Ground truth $ \delta $')
plt.subplot(3, 1, 2)
plt.scatter(results['max_s2_t_hat'], true_vals['s2_t'], c='k')
plt.xscale('log')
plt.xlim(np.exp(-6.), np.exp(6.))
plt.yscale('log')
plt.ylim(np.exp(-6.), np.exp(6.))
plt.plot([1e-2, 1e2], [1e-2, 1e2], c='r')
plt.ylabel('Ground truth $ \sigma_t^2 $')
plt.subplot(3, 1, 3)
plt.scatter(results['max_mu_hat'], true_vals['mu'], c='k')
plt.xlim(-1, 6)
plt.ylim(-1, 6)
plt.plot([0, 5], [0, 5], c='r')
plt.ylabel('Ground truth $ \mu $')
plt.xlabel('Inferred Value')
plt.savefig('simulation_accuracy.png')
if __name__ == '__main__':
opt_simulation()
# plot_LL_curves()
# main()
``` |
{
"source": "jiangfuqing/ST-pipeline",
"score": 3
} |
#### File: stpipeline/common/gff_reader.py
```python
from collections import defaultdict
import gzip
import re
# Code snipped from:
# https://gist.github.com/slowkow/8101481
GTF_HEADER = ['seqname', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame']
R_SEMICOLON = re.compile(r'\s*;\s*')
R_COMMA = re.compile(r'\s*,\s*')
R_KEYVALUE = re.compile(r'(\s+|\s*=\s*)')
def gff_lines(filename):
"""Open an optionally gzipped GTF file and generate a dict for each line.
"""
fn_open = gzip.open if filename.endswith('.gz') else open
with fn_open(filename) as fh:
for line in fh:
if line.startswith('#'):
continue
else:
yield gff_parse(line)
def gff_parse(line):
"""Parse a single GTF line and return a dict.
"""
result = {}
fields = line.rstrip().split('\t')
for i, col in enumerate(GTF_HEADER):
result[col] = _get_value(fields[i])
# INFO field consists of "key1=value;key2=value;...".
infos = [x for x in re.split(R_SEMICOLON, fields[8]) if x.strip()]
for i, info in enumerate(infos, 1):
# It should be key="value".
try:
key, _, value = re.split(R_KEYVALUE, info, 1)
# But sometimes it is just "value".
except ValueError:
key = 'INFO{}'.format(i)
value = info
# Ignore the field if there is no value.
if value:
result[key] = _get_value(value)
return result
def _get_value(value):
if not value:
return None
# Strip double and single quotes.
value = value.strip('"\'')
# Return a list if the value has a comma.
if ',' in value:
value = re.split(R_COMMA, value)
# These values are equivalent to None.
elif value in ['', '.', 'NA']:
return None
return value
```
#### File: stpipeline/common/utils.py
```python
import threading
from datetime import datetime
import os
import subprocess
import stat
def which_program(program):
"""
Checks that a program exists and is executable
:param program: the program name
:type program: str
:return: The program name if the program is in the system and is executable
"""
def is_exe(fpath):
return fpath is not None and os.path.exists(fpath) and os.access(fpath, os.X_OK)
def ext_candidates(fpath):
yield fpath
for ext in os.environ.get("PATHEXT", "").split(os.pathsep):
yield fpath + ext
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
for candidate in ext_candidates(exe_file):
if is_exe(candidate):
return candidate
return None
class TimeStamper(object):
"""
Thread safe time stamper
"""
def __init__(self):
self.lock = threading.Lock()
self.prev = None
self.count = 0
def getTimestamp(self):
with self.lock:
ts = datetime.now()
if ts == self.prev:
ts += '.%04d' % self.count
self.count += 1
else:
self.prev = ts
self.count = 1
return ts
def safeRemove(filename):
"""
Safely remove a file
:param filename: the path of the file
:type filename: str
"""
try:
if filename is not None and os.path.isfile(filename):
os.remove(filename)
except UnboundLocalError:
pass
def safeOpenFile(filename, atrib):
"""
Safely opens a file
For writing mode it removes the previous file if it exits
For reading mode it check that the file exists
:param filename: the path of the file
:param atrib: the file open/write attribute
:type filename: str
:type atrib: str
:return: the file descriptor
:raises: IOError
"""
if filename is None:
raise IOError("Error, no valid file name given\n")
if atrib.find("w") != -1:
safeRemove(filename)
elif atrib.find("r") != -1:
if not (os.path.isfile(filename) or is_fifo(filename)):
raise IOError("Error, the file does not exist {}\n".format(filename))
else:
raise IOError("Error, incorrect attribute {}\n".format(atrib))
return open(filename, atrib)
def fileOk(_file):
"""
Checks file exists and is not zero size
:param file: a file name
:return: True if the file is correct
"""
return _file is not None and os.path.isfile(_file) and not os.path.getsize(_file) == 0
def getSTARVersion():
"""
Tries to find the STAR binary
and makes a system call to get its
version and return it
"""
version = ""
try:
proc = subprocess.Popen(["STAR", "--version"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False, close_fds=True)
(stdout, errmsg) = proc.communicate()
version = stdout
except Exception:
version = "Not available"
return version.rstrip()
def getTaggdCountVersion():
"""
Tries to find the Taggd binary
and makes a system call to get its
version and return it
"""
version = ""
try:
proc = subprocess.Popen(["pip", "show", "taggd"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False, close_fds=True)
(stdout, errmsg) = proc.communicate()
for line in stdout.split("\n"):
if line.find("Version:") != -1:
version = str(line.split()[-1])
except Exception:
version = "Not available"
return version.rstrip()
def getHTSeqCountVersion():
"""
Tries to find the HTSeqCount binary
and makes a system call to get its
version and return it
"""
version = ""
try:
proc = subprocess.Popen(["pip", "show", "htseq"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False, close_fds=True)
(stdout, errmsg) = proc.communicate()
for line in stdout.split("\n"):
if line.find("Version:") != -1:
version = str(line.split()[-1])
except Exception:
version = "Not available"
return version.rstrip()
def is_fifo(file_name):
"""
Checks if the file name is a FIFO
:param file_name: a file name
:return: True if the file is a FIFO
"""
return (os.path.exists(file_name) and stat.S_ISFIFO(os.stat(file_name).st_mode))
``` |
{
"source": "JiangGua/alphacoders-wallpaper-downloader",
"score": 3
} |
#### File: JiangGua/alphacoders-wallpaper-downloader/main.py
```python
import os
import time
import json
import requests
from pyquery import PyQuery as pq
from threading import Thread
def download_pic(url, name):
pic = requests.get(url)
with open(name, 'wb') as file_obj:
file_obj.write(pic.content)
def fetch(api_key, tag_id, min_width, i):
info = requests.get('https://wall.alphacoders.com/api2.0/get.php?auth=%s&method=tag&id=%s&page=%d&sort=views' %
(api_key, tag_id, i))
info = json.loads(info.text)
if info['success']:
if len(info['wallpapers']) == 0:
print('开始下载…')
return False
result = []
for j in range(len(info['wallpapers'])):
pic_info = info['wallpapers'][j]
# 若当前图片小于设定的min_width,则直接下一张
if int(pic_info['width']) < min_width:
continue
file_name = pic_info['id'] + '.' + pic_info['file_type']
if os.path.exists(file_name):
continue
result.append({
"url": pic_info['url_image'],
"name": file_name,
})
print('已将第 %d 页添加到下载队列' % i)
return result
if __name__ == "__main__":
# Read Config
with open('config.json', 'r') as file_obj:
config = file_obj.read()
config = json.loads(config)
tag_id = str(config['tag_id'])
api_key = str(config['api_key'])
try:
max_page = int(config['max_page'])
except:
max_page = 0
try:
min_width = int(config['min_width'])
except:
min_width = 0
# Main
url = 'https://wall.alphacoders.com/tags.php?tid=' + str(tag_id)
html = pq(url=url)
dir_name = html('span.breadcrumb-element').text()
dir_name = "".join(i for i in dir_name if i.isalnum())
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
os.chdir(dir_name)
queue = [] # 下载链接和对应图片文件名的全列表(下载队列)
threads = []
if max_page != 0:
for i in range(1, max_page + 1):
result = fetch(api_key, tag_id, min_width, i)
if result == False:
break
queue += result
else:
i = 1
while True:
result = fetch(api_key, tag_id, min_width, i)
if result == False:
break
queue += result
i += 1
for i in queue:
t = Thread(target = download_pic, args = [i["url"], i["name"]])
t.start()
threads.append(t)
for t in threads:
t.join()
print("下载完成")
``` |
{
"source": "JiangGua/chatroom-name-history-weixin",
"score": 2
} |
#### File: JiangGua/chatroom-name-history-weixin/main.py
```python
import os
import re
import time
import json
import random
import platform
from shutil import copy2 as copyfile
from shutil import copytree as copytree
import yaml
import itchat
from git import Repo
from jinja2 import Environment, FileSystemLoader
def initialize():
# 创建输出文件夹
if not os.path.exists('output'):
os.makedirs('output')
# 创建全局输出文件夹
if not os.path.exists('output/global'):
os.makedirs('output/global')
# 创建空白的data.json文件
if not os.path.exists('output/global/data.json'):
with open('output/global/data.json', 'w', encoding='utf-8') as f:
f.write('[]')
# 将全局配置文件拷贝入通用输出文件夹
if not os.path.exists('output/global/config.yml'):
copyfile('config.yml.example', 'output/global/config.yml')
class ChatroomDataOperator():
def __init__(self, chatroom_id):
self.id = chatroom_id
self.path = 'output/global/{}.json'.format(chatroom_id)
def generate_chatroom_item(self):
obj = {
'roomName': [],
'deploy': {
'enable': False,
'repo': '',
'branch': 'gh-pages',
'siteTitle': '站点标题',
'theme': 'default',
},
}
return obj
def dump(self, obj):
with open(self.path, 'w', encoding='utf-8') as f:
json.dump(obj, f, ensure_ascii=False)
def fullData(self):
with open(self.path, 'r', encoding='utf-8') as f:
obj = json.load(f)
return obj
def deployConfig(self):
return self.fullData()['deploy']
def enable(self):
return self.deployConfig()['enable']
def theme(self):
return self.deployConfig()['theme']
def siteTitle(self):
return self.deployConfig()['siteTitle']
def repo(self):
return self.deployConfig()['repo']
def branch(self):
return self.deployConfig()['branch']
def roomName(self):
return self.fullData()['roomName']
def append_name(self, name_item: dict):
obj = self.fullData()
obj['roomName'].append(name_item)
self.dump(obj)
class DataJsonOperator():
def __init__(self, path = 'output/global/data.json'):
self.path = path
def _ranstr(self, num):
"""
生成长度为 num 的随机字符串
"""
seed = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
salt = ''
for i in range(num):
salt += random.choice(seed)
return salt
def chatrooms(self):
path = self.path
with open(path, 'r', encoding='utf-8') as f:
chatrooms = json.load(f)
return chatrooms
def dump(self, chatrooms):
path = self.path
with open(path, 'w', encoding='utf-8') as f:
json.dump(chatrooms, f, ensure_ascii=False)
def _generate_chatroom_item(self):
"""
返回一个字典
"""
chatrooms = self.chatrooms()
# 生成唯一ID
while (uuid:=self._ranstr(8)) in [i['id'] for i in chatrooms]:
pass
obj = {
'id': uuid,
'members': [],
}
return obj
def _recompose_memberlist(self, memberlist):
"""
读入 memberlist
输出一个包含若干 dict 的 list
"""
result = []
for member in memberlist:
obj = {
'wxName': member['NickName'],
'nickInGroup': member['DisplayName'] or member['NickName'],
}
result.append(obj)
return result
def find_chatroom_by_member(self, chatrooms, memberlist):
"""
根据成员列表找到群id.
如果找不到, 应该自动在data.json创建一个新的群item, 并返回其id.
用新获取到的成员列表覆盖原来记录的.
返回值:id
"""
current_members = self._recompose_memberlist(memberlist)
current_member_set = set([str(item['wxName']) for item in current_members])
flag = False # 匹配成功则设为 True
for chatroom in chatrooms:
recorded_member_set = set([item['wxName'] for item in chatroom['members']]) # 记录中的成员名单
intersection = recorded_member_set & current_member_set # 交集
if len(intersection) > (max(len(current_member_set)//2, len(current_member_set)-3)): # 允许1/2或3个(取较大者)人改名
flag = True
chatroom['members'] = current_members
self.dump(chatrooms)
return chatroom['id']
# 没有找到,则创建一个群item
if not flag:
chatroom = self._generate_chatroom_item()
chatroom['members'] = current_members
chatrooms.append(chatroom)
self.dump(chatrooms)
i = ChatroomDataOperator(chatroom['id'])
i.dump(i.generate_chatroom_item())
return chatroom['id']
class WebsiteGenerator():
def __init__(self, chatroom_id):
self.id = chatroom_id
self.path = 'output/{}/'.format(chatroom_id)
o = ChatroomDataOperator(chatroom_id)
self.config = o.deployConfig()
self.title = o.siteTitle()
self.roomName = o.roomName()
self.theme_path = 'themes/{}/'.format(o.theme())
def generate(self):
path = self.path
theme_path = self.theme_path
if not os.path.exists(path):
# 复制模板的静态文件到仓库文件夹
copytree(theme_path, path)
env = Environment(loader = FileSystemLoader(path))
template = env.get_template('index.html')
html = template.render(
title = self.title,
items = self.roomName,
)
with open(path+'index.html', 'w', encoding='utf-8') as f:
f.write(html)
class DeployerGit():
def __init__(self, chatroom_id, repo=None, branch="backup"):
self.id = chatroom_id
self.path = 'output/{}/'.format(chatroom_id)
if not repo:
c = ChatroomDataOperator(chatroom_id)
self.repo = c.repo()
self.branch = c.branch()
else:
self.repo = repo
self.branch = branch
def deploy(self):
try:
repo = Repo.init(self.path)
remote = repo.create_remote(name='kotonoha', url=self.repo)
except:
repo = Repo(self.path)
remote = repo.remotes.kotonoha
try:
repo.index.add('*')
repo.index.commit(message='Update')
except:
print("<Deployer> Git Repo: Failed to commit")
try:
remote.push(refspec='master:{}'.format(self.branch), force=True)
except:
print("<Deployer> Git Repo: Failed to push")
class GlobalConfig():
def __init__(self):
self.path = 'output/global/config.yml'
def allData(self):
with open(self.path, encoding='utf-8') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
return data
def deployConfig(self):
return self.allData()['deploy']
def theme(self):
return self.allData()['deploy']['theme']
def backupConfig(self):
return self.allData()['backup']
def backupEnable(self):
return self.allData()['backup']['enable']
def backupRepo(self):
return self.allData()['backup']['repo']
def backupBranch(self):
return self.allData()['backup']['branch']
def backup(self):
d = DeployerGit("global", self.backupRepo(), self.backupBranch())
d.deploy()
def msg_handler(msg):
# 仅处理与修改群名有关的系统消息
if str(msg['Text']).find("群名") != -1:
current_memberlist = msg['User']['MemberList']
o = DataJsonOperator()
chatrooms = o.chatrooms()
# 根据成员列表找到群id, 如果找不到, 应该自动在data.json创建一个新的群item, 并返回其id
chatroom_id = o.find_chatroom_by_member(chatrooms, current_memberlist)
chatroom = ChatroomDataOperator(chatroom_id)
name_re = re.compile(r'(?<=修改群名为“)[\s\S]*(?=”)')
name = re.search(name_re, str(msg['Text'])).group()
print('Modification Detected: {}'.format(name))
item = {
'timestamp': time.time(),
'date': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())),
'name': name,
}
chatroom.append_name(item)
if chatroom.enable():
# 生成网页
w = WebsiteGenerator(chatroom_id)
w.generate()
# Git 操作
print('Started: Deploying to remote repo...')
g = DeployerGit(chatroom_id)
g.deploy()
print('Deploy Success: {}'.format(chatroom_id))
# 全局数据及配置文件备份
gl = GlobalConfig()
if gl.backupEnable():
print('Started: Global Data Backup')
gl.backup()
print('Success: Global Data Backup')
@itchat.msg_register(itchat.content.NOTE, isGroupChat=True)
def received_msg(msg):
msg_handler(msg)
if __name__ == "__main__":
# 初始化
initialize()
# 如果不是 Windows 系统,则把二维码打在命令行界面; 如果是 Windows 系统,则用图片浏览器打开二维码
if platform.platform().lower().find('windows') == -1:
itchat.auto_login(hotReload=True, enableCmdQR=2)
else:
itchat.auto_login(hotReload=True)
# 运行 itchat
itchat.run()
``` |
{
"source": "JiangguoZhang/ELEC576project",
"score": 3
} |
#### File: dataloaders/iou_function/mean_iou.py
```python
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
def rle_decode(mask_rle, shape, color=True):
s = mask_rle.split()
starts = list(map(lambda x: int(x) - 1, s[0::2]))
lengths = list(map(int, s[1::2]))
ends = [x + y for x, y in zip(starts, lengths)]
img = np.zeros((shape[0] * shape[1]), dtype=bool)
for start, end in zip(starts, ends):
img[start: end] = color
return img.reshape(shape)
def iou(predict, label, threshold=0.5):
predict = predict >= threshold
TP = predict & label
P_all = predict | label
return np.sum(TP) / np.sum(P_all)
def mean_iou(pred_dir, label_csv, save_dir="/mnt/data/elec576/project/1120/compare"):
labels = pd.read_csv(label_csv)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
acc_list = []
for item in os.listdir(pred_dir):
image_id = item.split(".")[0]
pred_img = np.load(os.path.join(pred_dir, item))
pred_img = (pred_img + np.min(pred_img)) / np.max(pred_img) - np.min(pred_img)
rle_codes = labels[labels["id"] == image_id]["annotation"].tolist()
img_shape = pred_img.shape
label_img = np.zeros_like(pred_img, dtype=bool)
for rle_code in rle_codes:
label_img = np.bitwise_or(label_img, rle_decode(rle_code, img_shape))
iou_list = [iou(pred_img, label_img, threshold=threshold) for threshold in np.arange(0.5, 1, 0.05)]
iou_mean = np.mean(iou_list)
#fig, axs = plt.subplots(1, 2, figsize=[12, 6])
#axs[0].imshow(pred_img, cmap="gray")
#axs[1].imshow(label_img, cmap="gray")
#axs[1].set_title("%.2g, %.2g" % (iou_list[0], iou_list[-1]))
#fig.tight_layout()
#plt.savefig(os.path.join(save_dir, image_id))
#plt.close()
#print(iou_mean)
acc_list.append(iou_mean)
acc = np.mean(acc_list)
print(acc)
mean_iou("/mnt/data/elec576/project/1207-semi1/IMGS/test_stat", "/mnt/data/elec576/project/kaggle_cell_segmentation/sartorius-cell-instance-segmentation/train.csv")
```
#### File: ELEC576project/dataloaders/segmentation.py
```python
from PairedNeurons import PairedNeurons
from matplotlib import pyplot as plt
import os
import numpy as np
import cv2
from xlwt import Workbook
from skimage.segmentation import clear_border
SMOOTH = 1e-6
def iou_numpy(outputs: np.array, labels: np.array):
# outputs = outputs.squeeze(2)
intersection = (outputs & labels).sum((0, 1))
union = (outputs | labels).sum((0, 1))
iou = (intersection + SMOOTH) / (union + SMOOTH)
# thresholded = np.ceil(np.clip(20 * (iou - 0.5), 0, 10)) / 10
return iou # Or thresholded.mean()
img_dir = "/Users/mac/Desktop/Rice-COMP576/sartorius-cell-instance-segmentation/train"
csv_dir = "/Users/mac/Desktop/Rice-COMP576/sartorius-cell-instance-segmentation/train.csv"
pn = PairedNeurons(img_dir, csv_dir, 256, is_train=False)
sum1,sum2,sum3,sum4,sum5,sum6=0,0,0,0,0,0
# Workbook is created
wb = Workbook()
# add_sheet is used to create sheet.
sheet1 = wb.add_sheet('Sheet 1')
sheet1.write(0,0,"Image Name")
sheet1.write(0,1,"IOU for Binary+OSTU")
sheet1.write(0,2,"segmented further using watershed")
sheet1.write(0,3,"Using distance transform and thresholding")
sheet1.write(0,4,"threshold the dist transform at 1/2 its max value.")
for i in range(len(pn)):
x, y, l = pn.__getitem__(i)
sheet1.write(i+1, 0, l)
fig, axs = plt.subplots(2, 3, figsize=(16, 8))
# # print(fig.shape)
# # print(y.shape)
# # fig.colorbar(im)
# plt.savefig(os.path.join("./save", l))
# plt.close()
# plt.subplot(2, 3, i + 1)
###1
x=np.uint8(x*255)
axs[0,0].imshow(y, cmap="gray")
axs[0,0].axis("off")
axs[0,0].title.set_text("Grouth truth seg")
ret, th1 = cv2.threshold(x, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(th1,cv2.MORPH_OPEN,kernel, iterations = 1)
opening = clear_border(opening) #Remove edge touching grains
# print(iou_numpy(x.astype(int),np.uint8(opening).astype(int)))
sum1+=iou_numpy(x,opening.astype(int))
sheet1.write(i+1,1,sum1)
axs[0,1].imshow(opening, cmap="gray")
axs[0,1].axis("off")
axs[0,1].title.set_text("Threshold image to binary using OTSU")
###2
sure_bg = cv2.dilate(opening,kernel,iterations=1)
axs[0,2].imshow(sure_bg, cmap="gray")
axs[0,2].axis("off")
axs[0,2].title.set_text("segmented further using watershed")
sum2+=iou_numpy(np.uint8(y*255),sure_bg.astype(int))
sheet1.write(i+1,2,sum2)
###
###3
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,3)
axs[1,0].imshow(dist_transform, cmap="gray")
axs[1,0].axis("off")
axs[1,0].title.set_text("Using distance transform and thresholding")
sum3+=iou_numpy(np.uint8(y*255),dist_transform.astype(int))
sheet1.write(i+1,3,sum3)
###4
ret2, sure_fg = cv2.threshold(dist_transform,0.5*dist_transform.max(),255,0)
axs[1,1].imshow(sure_bg, cmap="gray")
axs[1,1].axis("off")
axs[1,1].title.set_text("threshold the dist transform at 1/2 its max value.")
sum4+=iou_numpy(np.uint8(y*255),sure_bg.astype(int))
sheet1.write(i+1,4,sum4)
####
###5 Unknown ambiguous region is nothing but bkground - foreground
sure_fg = np.uint8(sure_fg) #Convert to uint8 from float
unknown = cv2.subtract(sure_bg,sure_fg)
sum4+=iou_numpy(np.uint8(y*255),sure_bg.astype(int))
axs[1,2].imshow(unknown, cmap="gray")
axs[1,2].axis("off")
axs[1,2].title.set_text("Unknown ambiguous region is nothing but bkground ")
sheet1.write(i+1,5,sum5)
fig.tight_layout()
# print(iou_numpy((x*255).astype(int),(th1*255).astype(int)))
plt.savefig(os.path.join("./save", l))
plt.close()
# plt.title(l)
# plt.subplot(2, 3, i + 1)
# plt.imshow(opening, 'gray')
# plt.show()
wb.save('result.xls')
# print(sum/len(pn))
```
#### File: ELEC576project/dataloaders/testCv2.py
```python
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
SMOOTH = 1e-6
def iou_numpy(outputs: np.array, labels: np.array):
# outputs = outputs.squeeze(2)
intersection = (outputs & labels).sum((0, 1))
union = (outputs | labels).sum((0, 1))
iou = (intersection + SMOOTH) / (union + SMOOTH)
thresholded = np.ceil(np.clip(20 * (iou - 0.5), 0, 10)) / 10
return thresholded.mean() # Or thresholded.mean()
# 1.读取图像
img = cv.imread('/Users/mac/Desktop/Rice-COMP576/sartorius-cell-instance-segmentation/train/0030fd0e6378/0030fd0e6378.png',0)
# 2. 阈值分割
thresholdValue=135
ret, th1 = cv.threshold(img, thresholdValue, 255, cv.THRESH_BINARY)
ret,th2 = cv.threshold(img,200,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
ret, th4 = cv.threshold(img, thresholdValue, 255, cv.THRESH_TOZERO)
# 3. 图像显示
titles = ['original', 'th1', 'th2', 'th3', 'th4', 'th5']
images = [img, th1,th2, th4]
plt.figure(figsize=(10,6))
# 使用Matplotlib显示
for i in range(4):
plt.subplot(2, 3, i + 1)
plt.imshow(images[i], 'gray')
plt.xticks([]), plt.yticks([]) # 隐藏坐标轴
plt.show()
```
#### File: JiangguoZhang/ELEC576project/test_pix2pixHD.py
```python
import argparse
import io
import json
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from matplotlib_scalebar.scalebar import ScaleBar
from torch.utils.data import DataLoader
import dataloaders
from nets import GAN2D
from pytorch_utils import util, batcher
EPS = 1e-8
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=6, help='input batch size')
parser.add_argument('--no-cuda', action='store_false', help='disables cuda')
parser.add_argument('--net-struct', default='./structure/pix2pixHD.json',
help='The net structure.')
parser.add_argument('--multiGPU', action='store_true',
help='''Enable training on multiple GPUs, uses all that are available.''')
parser.add_argument('--dataset-loc',
default="/mnt/data/elec576/project/kaggle_cell_segmentation/sartorius-cell-instance-segmentation/test",
help='Folder containing training dataset')
common_dir = "IMGS"
scratch_dir = "1213"
parser.add_argument('--load',
default=scratch_dir + '/ckpts/CHECKPOINT-400',
help='''Load pre-trained networks''')
parser.add_argument('--img-loc',
default='%s/%s/test/img' % (scratch_dir, common_dir),
help='The directory to save output images.')
parser.add_argument('--stat-loc',
default='%s/%s/test/stat' % (scratch_dir, common_dir),
help='The analysis results are saved in this directory.')
parser.add_argument('--visualize', action='store_true', help='Visualize result.')
parser.add_argument('--movie', action='store_true', help='Create movie result.')
parser.add_argument('--norm-min', type=int, default=-1, help="The normalized minimum")
parser.add_argument('--norm-max', type=int, default=1, help="The normalized maximum")
parser.add_argument('--threshold', type=float, default=.5, help="The threshold of IoU.")
parser.add_argument('--num-workers', type=int, default=3, help="The number of cores to load images.")
parser.add_argument('--crop-x', type=int, default=720, help='The height of input image.')
parser.add_argument('--crop-y', type=int, default=720, help='The width of input image.')
parser.add_argument('--crop-size', type=int, default=512, help='The input size.')
parser.add_argument('--g1', default="g1_out", help='The name of the final layer in generator 1.')
parser.add_argument('--g2', default="g2_out", help='The name of the final layer in generator 2.')
parser.add_argument('--d-layer', default="feat", help='The name of the final layer in discriminator.')
parser.add_argument('--n-layers', type=int, default=3, help='The levels of discriminator.')
opt = parser.parse_args()
opt.visualize = False
print(opt)
if not os.path.exists(opt.img_loc):
os.makedirs(opt.img_loc)
if not os.path.exists(opt.stat_loc):
os.makedirs(opt.stat_loc)
s = json.load(open(opt.net_struct, "rb"))
G = GAN2D.ConvNet(s["G"], [opt.g1, opt.g2]).eval()
dataset = dataloaders.PairedNeurons(opt.dataset_loc, None, crop_x=opt.crop_x, crop_y=opt.crop_y,
norm_min=opt.norm_min, norm_max=opt.norm_max, is_train=False, is_supervised=False)
train_loader = DataLoader(
dataset,
num_workers=opt.num_workers, # Use this to replace data_prefetcher
batch_size=opt.batch_size,
shuffle=False,
pin_memory=opt.no_cuda
)
if opt.multiGPU:
G = nn.DataParallel(G)
'''Load net from directory'''
if opt.load is not None:
util.load_nets(opt.load, {
'G': G
}, on_gpu=opt.no_cuda)
if opt.no_cuda:
G = G.cuda()
batch = batcher()
def get_img_from_fig(fig, dpi=180):
buf = io.BytesIO()
fig.savefig(buf, format="png", dpi=dpi)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img = cv2.imdecode(img_arr, 1)
#cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img
def test(norm_rage=None):
imgList = []
id_list = []
rle_list = []
with torch.no_grad():
for batch_idx, tl in enumerate(train_loader):
img0, img1, target = tl
print("\r Processing the %d th batch." % batch_idx, end='')
batch.batch()
if opt.no_cuda:
img0 = img0.cuda()
_, x_fake = G(img0)
# pkl.dump(img0.cpu().data.numpy(), open("0810/tmp.pkl", 'wb'))
im0 = img0.cpu().data.numpy()
im1 = img1.cpu().data.numpy()
im_gen = x_fake.cpu().data.numpy()
for i in range(np.size(im1, axis=0)):
ground_truth = dataset.remove_padding(im1[i, 0, :, :])
gen = dataset.remove_padding(im_gen[i, 0, :, :])
neuron = dataset.remove_padding(im0[i, 0, :, :])
id_list.append(target[i])
rle_list.append(dataset.rle_encode(gen >= opt.threshold))
if opt.visualize:
save_dir = os.path.join(opt.stat_loc, target[i])
np.save(save_dir, gen)
fig, ax = plt.subplots(1, 3, figsize=[12, 4])
ax[0].imshow(neuron, cmap='gray')
ax[0].axis('off')
ax[0].set_title("Neuron")
#ax[0].text(10, 50, "A", fontsize=36, color='white')
ax[1].imshow(ground_truth, cmap='gray')
ax[1].axis('off')
ax[1].set_title("Ground Truth")
#ax[1].text(10, 50, "B", fontsize=36, color='white')
ax[2].imshow(gen, cmap='gray')
ax[2].axis('off')
ax[2].set_title("Synthesized")
#ax[2].text(10, 50, "C", fontsize=36, color='white')
fig.tight_layout()
scalebar = ScaleBar(1.3e-6, location="upper right") # 1 pixel = 1.3 microns
plt.gca().add_artist(scalebar)
img_dir = opt.img_loc
if not os.path.exists(img_dir):
os.makedirs(img_dir)
plt.savefig(os.path.join(img_dir, "%s.png" % target[i]))
if opt.movie:
img = get_img_from_fig(fig)
imgList.append(img)
plt.close(fig)
csv_df = pd.DataFrame({"id": id_list, "annotation": rle_list})
csv_df.to_csv(os.path.join(opt.stat_loc, "submission.csv"), sep=',', encoding='utf-8', index=None)
if opt.movie:
movieSize = imgList[0].shape[:2]
movieSize = (movieSize[1], movieSize[0])
out = cv2.VideoWriter(os.path.join(opt.img_loc, "movie.avi"), cv2.VideoWriter_fourcc(*'DIVX'), 4, movieSize)
for img in imgList:
out.write(img)
out.release()
test()
```
#### File: ELEC576project/UNET/fcn.py
```python
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import models
from torchvision.models.vgg import VGG
import torch
import torch.nn as nn
#from .utils import load_state_dict_from_url
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def vgg11(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)
def vgg11_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)
def vgg13(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)
def vgg13_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs)
def vgg16(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
def vgg16_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)
def vgg19(pretrained=False, progress=True, **kwargs):
r"""VGG 19-layer model (configuration "E")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)
def vgg19_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 19-layer model (configuration 'E') with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)
class FCN32s(nn.Module):
def __init__(self, pretrained_net, n_class):
super().__init__()
self.n_class = n_class
self.pretrained_net = pretrained_net
self.relu = nn.ReLU(inplace=True)
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn1 = nn.BatchNorm2d(512)
self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn2 = nn.BatchNorm2d(256)
self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.classifier = nn.Conv2d(64, n_class, kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
#print(x.shape)
output = self.pretrained_net(x)
# print(output['x1'].shape)
# print(output['x2'].shape)
# print(output['x3'].shape)
# print(output['x4'].shape)
x4 = output['x4'] # size=(N, 512, x.H/32, x.W/32)
#print(x5.shape)
score = self.bn1(self.relu(self.deconv1(x4))) # size=(N, 512, x.H/16, x.W/16)
#print(score.shape)
score = self.bn2(self.relu(self.deconv2(score))) # size=(N, 256, x.H/8, x.W/8)
#print(score.shape)
score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4)
#print(score.shape)
score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2)
#print(score.shape)
#score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W)
score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1)
score = self.sigmoid(score)
# print(score.shape)
return score # size=(N, n_class, x.H/1, x.W/1)
class FCN16s(nn.Module):
def __init__(self, pretrained_net, n_class):
super().__init__()
self.n_class = n_class
self.pretrained_net = pretrained_net
self.relu = nn.ReLU(inplace=True)
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn1 = nn.BatchNorm2d(512)
self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn2 = nn.BatchNorm2d(256)
self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.classifier = nn.Conv2d(32, n_class, kernel_size=1)
def forward(self, x):
output = self.pretrained_net(x)
x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32)
x4 = output['x4'] # size=(N, 512, x.H/16, x.W/16)
score = self.relu(self.deconv1(x5)) # size=(N, 512, x.H/16, x.W/16)
score = self.bn1(score + x4) # element-wise add, size=(N, 512, x.H/16, x.W/16)
score = self.bn2(self.relu(self.deconv2(score))) # size=(N, 256, x.H/8, x.W/8)
score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4)
score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2)
score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W)
score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1)
return score # size=(N, n_class, x.H/1, x.W/1)
class FCN8s(nn.Module):
def __init__(self, pretrained_net, n_class):
super().__init__()
self.n_class = n_class
self.pretrained_net = pretrained_net
self.relu = nn.ReLU(inplace=True)
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn1 = nn.BatchNorm2d(512)
self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn2 = nn.BatchNorm2d(256)
self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.classifier = nn.Conv2d(32, n_class, kernel_size=1)
def forward(self, x):
output = self.pretrained_net(x)
x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32)
x4 = output['x4'] # size=(N, 512, x.H/16, x.W/16)
x3 = output['x3'] # size=(N, 256, x.H/8, x.W/8)
score = self.relu(self.deconv1(x5)) # size=(N, 512, x.H/16, x.W/16)
score = self.bn1(score + x4) # element-wise add, size=(N, 512, x.H/16, x.W/16)
score = self.relu(self.deconv2(score)) # size=(N, 256, x.H/8, x.W/8)
score = self.bn2(score + x3) # element-wise add, size=(N, 256, x.H/8, x.W/8)
score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4)
score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2)
score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W)
score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1)
score = nn.Sigmoid()(score)
return score # size=(N, n_class, x.H/1, x.W/1)
class FCNs(nn.Module):
def __init__(self, pretrained_net, n_class):
super().__init__()
self.n_class = n_class
self.pretrained_net = pretrained_net
self.relu = nn.ReLU(inplace=True)
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn1 = nn.BatchNorm2d(512)
self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn2 = nn.BatchNorm2d(256)
self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
self.bn5 = nn.BatchNorm2d(32)
self.classifier = nn.Conv2d(32, n_class, kernel_size=1)
def forward(self, x):
output = self.pretrained_net(x)
x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32)
x4 = output['x4'] # size=(N, 512, x.H/16, x.W/16)
x3 = output['x3'] # size=(N, 256, x.H/8, x.W/8)
x2 = output['x2'] # size=(N, 128, x.H/4, x.W/4)
x1 = output['x1'] # size=(N, 64, x.H/2, x.W/2)
score = self.bn1(self.relu(self.deconv1(x5))) # size=(N, 512, x.H/16, x.W/16)
score = score + x4 # element-wise add, size=(N, 512, x.H/16, x.W/16)
score = self.bn2(self.relu(self.deconv2(score))) # size=(N, 256, x.H/8, x.W/8)
score = score + x3 # element-wise add, size=(N, 256, x.H/8, x.W/8)
score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4)
score = score + x2 # element-wise add, size=(N, 128, x.H/4, x.W/4)
score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2)
score = score + x1 # element-wise add, size=(N, 64, x.H/2, x.W/2)
score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W)
score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1)
return score # size=(N, n_class, x.H/1, x.W/1)
class VGGNet(VGG):
def __init__(self, pretrained=True, model='vgg16', requires_grad=True, remove_fc=True, show_params=False):
super().__init__(make_layers(cfg[model]))
self.ranges = ranges[model]
if pretrained:
exec("self.load_state_dict(models.%s(pretrained=True).state_dict())" % model)
if not requires_grad:
for param in super().parameters():
param.requires_grad = False
if remove_fc: # delete redundant fully-connected layer params, can save memory
del self.classifier
if show_params:
for name, param in self.named_parameters():
print(name, param.size())
def forward(self, x):
output = {}
# get the output of each maxpooling layer (5 maxpool in VGG net)
for idx in range(len(self.ranges)):
for layer in range(self.ranges[idx][0], self.ranges[idx][1]):
x = self.features[layer](x)
output["x%d"%(idx+1)] = x
return output
ranges = {
'vgg11': ((0, 3), (3, 6), (6, 11), (11, 16), (16, 21)),
'vgg13': ((0, 5), (5, 10), (10, 15), (15, 20), (20, 25)),
'vgg16': ((0, 5), (5, 10), (10, 17), (17, 24), (24, 31)),
'vgg19': ((0, 5), (5, 10), (10, 19), (19, 28), (28, 37))
}
# cropped version from https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
cfg = {
'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def get_fcn32s(n_class=1):
vgg_model = VGGNet(requires_grad=True)
return FCN32s(pretrained_net=vgg_model, n_class=n_class)
def get_fcn8s(n_class=1):
vgg_model = VGGNet(requires_grad=True)
return FCN8s(pretrained_net=vgg_model, n_class=n_class)
```
#### File: ELEC576project/UNET/main.py
```python
import argparse
import logging
import torch
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from torch import autograd, optim
from UNet import Unet,resnet34_unet
from attention_unet import AttU_Net
from channel_unet import myChannelUnet
from r2unet import R2U_Net
from segnet import SegNet
from unetpp import NestedUNet
from fcn import get_fcn8s
from dataset import *
from metrics import *
from torchvision.transforms import transforms
from plot import loss_plot
from plot import metrics_plot
from torchvision.models import vgg16
# Yhead dataset
import sys
sys.path.append('..')
from dataloaders.PairedNeurons import PairedNeurons
def getArgs():
parse = argparse.ArgumentParser()
parse.add_argument('--deepsupervision', default=0)
parse.add_argument("--action", type=str, help="train/test/train&test", default="train&test")
parse.add_argument("--epoch", type=int, default=21)
parse.add_argument('--arch', '-a', metavar='ARCH', default='resnet34_unet',
help='UNet/resnet34_unet/unet++/myChannelUnet/Attention_UNet/segnet/r2unet/fcn32s/fcn8s')
parse.add_argument("--batch_size", type=int, default=1)
parse.add_argument('--dataset', default='dsb2018Cell', # dsb2018_256
help='dataset name:liver/esophagus/dsb2018Cell/corneal/driveEye/isbiCell/kaggleLung/yhead')
# parse.add_argument("--ckp", type=str, help="the path of model weight file")
parse.add_argument("--log_dir", default='result/log', help="log dir")
parse.add_argument("--threshold",type=float,default=None)
args = parse.parse_args()
return args
def getLog(args):
dirname = os.path.join(args.log_dir,args.arch,str(args.batch_size),str(args.dataset),str(args.epoch))
filename = dirname +'/log.log'
if not os.path.exists(dirname):
os.makedirs(dirname)
logging.basicConfig(
filename=filename,
level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(message)s'
)
return logging
def getModel(args):
if args.arch == 'UNet':
model = Unet(3, 1).to(device)
if args.arch == 'resnet34_unet':
model = resnet34_unet(1,pretrained=False).to(device)
if args.arch == 'unet++':
args.deepsupervision = True
model = NestedUNet(args,3,1).to(device)
if args.arch =='Attention_UNet':
model = AttU_Net(3,1).to(device)
if args.arch == 'segnet':
model = SegNet(3,1).to(device)
if args.arch == 'r2unet':
model = R2U_Net(3,1).to(device)
# if args.arch == 'fcn32s':
# model = get_fcn32s(1).to(device)
if args.arch == 'myChannelUnet':
model = myChannelUnet(3,1).to(device)
if args.arch == 'fcn8s':
assert args.dataset !='esophagus' ,"fcn8s模型不能用于数据集esophagus,因为esophagus数据集为80x80,经过5次的2倍降采样后剩下2.5x2.5,分辨率不能为小数,建议把数据集resize成更高的分辨率再用于fcn"
model = get_fcn8s(1).to(device)
if args.arch == 'cenet':
from cenet import CE_Net_
model = CE_Net_().to(device)
return model
def getDataset(args):
train_dataloaders, val_dataloaders ,test_dataloaders= None,None,None
if args.dataset == 'yhead':
train_dataset = PairedNeurons(
'/mnt/data/elec576/project/kaggle_cell_segmentation/sartorius-cell-instance-segmentation/train',
'/mnt/data/elec576/project/kaggle_cell_segmentation/sartorius-cell-instance-segmentation/train.csv',
crop_x=256, crop_y=256, norm_min=-1, norm_max=1, is_train=True, is_supervised=True
)
train_dataloaders = DataLoader(train_dataset, batch_size=args.batch_size)
val_dataset = PairedNeurons(
'/mnt/data/elec576/project/kaggle_cell_segmentation/sartorius-cell-instance-segmentation/train',
'/mnt/data/elec576/project/kaggle_cell_segmentation/sartorius-cell-instance-segmentation/train.csv',
crop_x=720, crop_y=720, norm_min=-1, norm_max=1, is_train=False, is_supervised=True
)
val_dataloaders = DataLoader(val_dataset, batch_size=1)
test_dataloaders = val_dataloaders
if args.dataset =='liver': #E:\代码\new\u_net_liver-master\data\liver\val
train_dataset = LiverDataset(r"train", transform=x_transforms, target_transform=y_transforms)
train_dataloaders = DataLoader(train_dataset, batch_size=args.batch_size)
val_dataset = LiverDataset(r"val", transform=x_transforms, target_transform=y_transforms)
val_dataloaders = DataLoader(val_dataset, batch_size=1)
test_dataloaders = val_dataloaders
if args.dataset =="esophagus":
train_dataset = esophagusDataset(r"train", transform=x_transforms,target_transform=y_transforms)
train_dataloaders = DataLoader(train_dataset, batch_size=args.batch_size)
val_dataset = esophagusDataset(r"val", transform=x_transforms,target_transform=y_transforms)
val_dataloaders = DataLoader(val_dataset, batch_size=1)
test_dataloaders = val_dataloaders
if args.dataset == "dsb2018Cell":
train_dataset = dsb2018CellDataset(r"train", transform=x_transforms, target_transform=y_transforms)
train_dataloaders = DataLoader(train_dataset, batch_size=args.batch_size)
val_dataset = dsb2018CellDataset(r"val", transform=x_transforms, target_transform=y_transforms)
val_dataloaders = DataLoader(val_dataset, batch_size=1)
test_dataloaders = val_dataloaders
if args.dataset == 'corneal':
train_dataset = CornealDataset(r'train',transform=x_transforms, target_transform=y_transforms)
train_dataloaders = DataLoader(train_dataset, batch_size=args.batch_size)
val_dataset = CornealDataset(r"val", transform=x_transforms, target_transform=y_transforms)
val_dataloaders = DataLoader(val_dataset, batch_size=1)
test_dataset = CornealDataset(r"test", transform=x_transforms, target_transform=y_transforms)
test_dataloaders = DataLoader(test_dataset, batch_size=1)
if args.dataset == 'driveEye':
train_dataset = DriveEyeDataset(r'train', transform=x_transforms, target_transform=y_transforms)
train_dataloaders = DataLoader(train_dataset, batch_size=args.batch_size)
val_dataset = DriveEyeDataset(r"val", transform=x_transforms, target_transform=y_transforms)
val_dataloaders = DataLoader(val_dataset, batch_size=1)
test_dataset = DriveEyeDataset(r"test", transform=x_transforms, target_transform=y_transforms)
test_dataloaders = DataLoader(test_dataset, batch_size=1)
if args.dataset == 'isbiCell':
train_dataset = IsbiCellDataset(r'train', transform=x_transforms, target_transform=y_transforms)
train_dataloaders = DataLoader(train_dataset, batch_size=args.batch_size)
val_dataset = IsbiCellDataset(r"val", transform=x_transforms, target_transform=y_transforms)
val_dataloaders = DataLoader(val_dataset, batch_size=1)
test_dataset = IsbiCellDataset(r"test", transform=x_transforms, target_transform=y_transforms)
test_dataloaders = DataLoader(test_dataset, batch_size=1)
if args.dataset == 'kaggleLung':
train_dataset = LungKaggleDataset(r'train', transform=x_transforms, target_transform=y_transforms)
train_dataloaders = DataLoader(train_dataset, batch_size=args.batch_size)
val_dataset = LungKaggleDataset(r"val", transform=x_transforms, target_transform=y_transforms)
val_dataloaders = DataLoader(val_dataset, batch_size=1)
test_dataset = LungKaggleDataset(r"test", transform=x_transforms, target_transform=y_transforms)
test_dataloaders = DataLoader(test_dataset, batch_size=1)
return train_dataloaders,val_dataloaders,test_dataloaders
def val(model,best_iou,val_dataloaders):
model= model.eval()
with torch.no_grad():
i=0 #验证集中第i张图
miou_total = 0
hd_total = 0
dice_total = 0
num = len(val_dataloaders) #验证集图片的总数
#print(num)
# for x, _,pic,mask in val_dataloaders:
for x,mask,_ in val_dataloaders:
x = x.to(device)
y = model(x)
if args.deepsupervision:
img_y = torch.squeeze(y[-1]).cpu().numpy()
mask = mask.squeeze().cpu().numpy()
else:
img_y = torch.squeeze(y).cpu().numpy() #输入损失函数之前要把预测图变成numpy格式,且为了跟训练图对应,要额外加多一维表示batchsize
mask = mask.squeeze().cpu().numpy()
hd_total += get_hd(mask, img_y)
miou_total += get_iou(mask,img_y) #获取当前预测图的miou,并加到总miou中
dice_total += get_dice(mask,img_y)
if i < num:i+=1 #处理验证集下一张图
aver_iou = miou_total / num
aver_hd = hd_total / num
aver_dice = dice_total/num
print('Miou=%f,aver_hd=%f,aver_dice=%f' % (aver_iou,aver_hd,aver_dice))
logging.info('Miou=%f,aver_hd=%f,aver_dice=%f' % (aver_iou,aver_hd,aver_dice))
if aver_iou > best_iou:
print('aver_iou:{} > best_iou:{}'.format(aver_iou,best_iou))
logging.info('aver_iou:{} > best_iou:{}'.format(aver_iou,best_iou))
logging.info('===========>save best model!')
best_iou = aver_iou
print('===========>save best model!')
torch.save(model.state_dict(), r'./saved_model/'+str(args.arch)+'_'+str(args.batch_size)+'_'+str(args.dataset)+'_'+str(args.epoch)+'.pth')
return best_iou,aver_iou,aver_dice,aver_hd
def train(model, criterion, optimizer, train_dataloader,val_dataloader, args):
best_iou,aver_iou,aver_dice,aver_hd = 0,0,0,0
num_epochs = args.epoch
threshold = args.threshold
loss_list = []
iou_list = []
dice_list = []
hd_list = []
for epoch in range(num_epochs):
model = model.train()
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
logging.info('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
dt_size = len(train_dataloader.dataset)
epoch_loss = 0
step = 0
for x, y,_ in train_dataloader:
# for x, y,_,mask in train_dataloader:
step += 1
inputs = x.to(device)
labels = y.to(device)
# zero the parameter gradients
optimizer.zero_grad()
if args.deepsupervision:
outputs = model(inputs)
loss = 0
for output in outputs:
loss += criterion(output, labels)
loss /= len(outputs)
else:
output = model(inputs)
loss = criterion(output, labels)
if threshold!=None:
if loss > threshold:
loss.backward()
optimizer.step()
epoch_loss += loss.item()
else:
loss.backward()
optimizer.step()
epoch_loss += loss.item()
print("%d/%d,train_loss:%0.3f" % (step, (dt_size - 1) // train_dataloader.batch_size + 1, loss.item()))
logging.info("%d/%d,train_loss:%0.3f" % (step, (dt_size - 1) // train_dataloader.batch_size + 1, loss.item()))
loss_list.append(epoch_loss)
best_iou,aver_iou,aver_dice,aver_hd = val(model,best_iou,val_dataloader)
iou_list.append(aver_iou)
dice_list.append(aver_dice)
hd_list.append(aver_hd)
print("epoch %d loss:%0.3f" % (epoch, epoch_loss))
logging.info("epoch %d loss:%0.3f" % (epoch, epoch_loss))
loss_plot(args, loss_list)
metrics_plot(args, 'iou&dice',iou_list, dice_list)
metrics_plot(args,'hd',hd_list)
return model
def test(val_dataloaders,save_predict=False):
logging.info('final test........')
if save_predict ==True:
dir = os.path.join(r'./saved_predict',str(args.arch),str(args.batch_size),str(args.epoch),str(args.dataset))
if not os.path.exists(dir):
os.makedirs(dir)
else:
print('dir already exist!')
model.load_state_dict(torch.load(r'./saved_model/'+str(args.arch)+'_'+str(args.batch_size)+'_'+str(args.dataset)+'_'+str(args.epoch)+'.pth', map_location='cpu')) # 载入训练好的模型
model.eval()
#plt.ion() #开启动态模式
pic_file_num = 0
with torch.no_grad():
i=0 #验证集中第i张图
miou_total = 0
hd_total = 0
dice_total = 0
num = len(val_dataloaders) #验证集图片的总数
# for pic,_,pic_path,mask_path in val_dataloaders:
for pic,mask,_ in val_dataloaders:
pic = pic.to(device)
predict = model(pic)
if args.deepsupervision:
predict = torch.squeeze(predict[-1]).cpu().numpy()
mask = mask.squeeze().cpu().numpy()
else:
predict = torch.squeeze(predict).cpu().numpy() #输入损失函数之前要把预测图变成numpy格式,且为了跟训练图对应,要额外加多一维表示batchsize
mask = mask.squeeze().cpu().numpy()
#img_y = torch.squeeze(y).cpu().numpy() #输入损失函数之前要把预测图变成numpy格式,且为了跟训练图对应,要额外加多一维表示batchsize
iou = get_iou(mask,predict)
miou_total += iou #获取当前预测图的miou,并加到总miou中
hd_total += get_hd(mask, predict)
dice = get_dice(mask,predict)
dice_total += dice
fig = plt.figure()
ax1 = fig.add_subplot(1, 3, 1)
ax1.set_title('input')
pic = pic[0,0,:,:]
plt.imshow(pic.squeeze().detach().cpu().numpy())
# plt.imshow(Image.open(pic_path[0]))
#print(pic_path[0])
ax2 = fig.add_subplot(1, 3, 2)
ax2.set_title('predict')
plt.imshow(predict,cmap='Greys_r')
ax3 = fig.add_subplot(1, 3, 3)
ax3.set_title('mask')
# plt.imshow(Image.open(mask_path[0]), cmap='Greys_r')
plt.imshow(mask, cmap='Greys_r')
#print(mask_path[0])
if save_predict == True:
if args.dataset == 'driveEye':
pass
# saved_predict = dir + '/' + mask_path[0].split('/')[-1]
# saved_predict = '.'+saved_predict.split('.')[1] + '.tif'
# plt.savefig(saved_predict)
else:
# plt.savefig(dir +'/'+ mask_path[0].split('/')[-1])
plt.savefig(dir + f'/{pic_file_num}.png')
pic_file_num += 1
#plt.pause(0.01)
print('iou={},dice={}'.format(iou,dice))
if i < num:i+=1 #处理验证集下一张图
#plt.show()
print('Miou=%f,aver_hd=%f,dv=%f' % (miou_total/num,hd_total/num,dice_total/num))
logging.info('Miou=%f,aver_hd=%f,dv=%f' % (miou_total/num,hd_total/num,dice_total/num))
#print('M_dice=%f' % (dice_total / num))
if __name__ =="__main__":
x_transforms = transforms.Compose([
transforms.ToTensor(), # -> [0,1]
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) # ->[-1,1]
])
# mask只需要转换为tensor
y_transforms = transforms.ToTensor()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
args = getArgs()
logging = getLog(args)
print('**************************')
print('models:%s,\nepoch:%s,\nbatch size:%s\ndataset:%s' % \
(args.arch, args.epoch, args.batch_size,args.dataset))
logging.info('\n=======\nmodels:%s,\nepoch:%s,\nbatch size:%s\ndataset:%s\n========' % \
(args.arch, args.epoch, args.batch_size,args.dataset))
print('**************************')
model = getModel(args)
train_dataloaders,val_dataloaders,test_dataloaders = getDataset(args)
criterion = torch.nn.BCELoss()
optimizer = optim.Adam(model.parameters())
if 'train' in args.action:
train(model, criterion, optimizer, train_dataloaders,val_dataloaders, args)
if 'test' in args.action:
test(test_dataloaders, save_predict=True)
```
#### File: ELEC576project/UNET/plot.py
```python
import matplotlib.pyplot as plt
import os
def loss_plot(args,loss):
num = args.epoch
x = [i for i in range(num)]
plot_save_path = r'result/plot/'
if not os.path.exists(plot_save_path):
os.makedirs(plot_save_path)
save_loss = plot_save_path+str(args.arch)+'_'+str(args.batch_size)+'_'+str(args.dataset)+'_'+str(args.epoch)+'_loss.jpg'
plt.figure()
plt.plot(x,loss,label='loss')
plt.legend()
plt.savefig(save_loss)
def metrics_plot(arg,name,*args):
num = arg.epoch
names = name.split('&')
metrics_value = args
i=0
x = [i for i in range(num)]
plot_save_path = r'result/plot/'
if not os.path.exists(plot_save_path):
os.makedirs(plot_save_path)
save_metrics = plot_save_path + str(arg.arch) + '_' + str(arg.batch_size) + '_' + str(arg.dataset) + '_' + str(arg.epoch) + '_'+name+'.jpg'
plt.figure()
for l in metrics_value:
plt.plot(x,l,label=str(names[i]))
#plt.scatter(x,l,label=str(l))
i+=1
plt.legend()
plt.savefig(save_metrics)
``` |
{
"source": "jianghai33/hapjs",
"score": 2
} |
#### File: external/public/ndk_build.py
```python
import os
import sys
import re
import linecache
V8_REL_PATH = 'external/v8'
MK_DIRS = []
LOCAL_PROPERTIES_DIRS = []
CONFIGS = {}
DIR_KEYS = [
'root-path',
'v8-tools-path'
]
def parse_args(argv):
global CONFIGS
count = len(argv)
if (count < len(DIR_KEYS) * 2):
print('need parameters --root-path, --v8-tools-path')
sys.exit(1)
i = 0
while i < count:
arg = argv[i]
if arg[:2] == '--':
key = arg[2:]
if (key in DIR_KEYS):
CONFIGS[key] = argv[i + 1]
i = i + 2
else:
print('arg error', arg)
sys.exit(1)
def check_v8_dir():
v8_tools_path = CONFIGS['v8-tools-path']
if os.path.exists(v8_tools_path):
print('v8 directory already exists')
return
else:
download_v8()
def download_v8():
username = get_user_name()
print('username is :', username)
git_ssh_url = 'ssh://' + username + "@gerrit.sodajs.org:29418/hap/external/v8"
cmds = ['git', 'clone', git_ssh_url]
command_line = ' '.join(cmds)
print(command_line)
v8_dir_pos = CONFIGS['v8-tools-path'].rfind('v8')
v8_dir = CONFIGS['v8-tools-path'][:v8_dir_pos]
os.chdir(v8_dir)
os.system(command_line)
def get_user_name():
cmds = ['git', 'config', 'user.name']
command_line = ' '.join(cmds)
print(command_line)
username = os.popen(command_line).read()
if (username == ''):
print('get user name error')
sys.exit(1)
username = username.strip('\n')
return username
def write_local_properties(local_properties_dir):
SPECIAL_TAG = '# add by ndk_build.py'
has_special_tag = False
ndk_dir_pattern = re.compile('[ ]*ndk.dir[ ]*=')
lp_path = os.path.join(local_properties_dir, 'local.properties')
comment_line = SPECIAL_TAG + '\n'
ndk_line = 'ndk.dir='+os.path.join(CONFIGS['v8-tools-path'], 'ndk') + '\n'
file_content = linecache.getlines(lp_path)
i = 0
while i < len(file_content):
if (file_content[i].find(SPECIAL_TAG) >= 0 and i < len(file_content) - 1):
file_content[i + 1] = ndk_line
i = i + 2
has_special_tag = True
continue
if (re.match(ndk_dir_pattern, file_content[i])):
file_content[i] = '#' + file_content[i]
i = i + 1
continue
i = i + 1
with open(lp_path, 'w') as f:
if len(file_content) > 0:
f.writelines(file_content)
if (has_special_tag == False):
f.write(comment_line)
f.write(ndk_line)
return
def add_local_properties():
for lp_dir in LOCAL_PROPERTIES_DIRS:
write_local_properties(lp_dir)
print('add local.properties in', lp_dir)
def filter_local_properties(lp_dir):
# remove repeat dir
for lp in LOCAL_PROPERTIES_DIRS:
if (lp_dir != lp and lp_dir.find(lp) >=0):
return False
return True
def get_local_properties_dirs_and_mk_dirs():
global MK_DIRS, LOCAL_PROPERTIES_DIRS
for relpath, dirs, files in os.walk(CONFIGS['root-path']):
if ('build.gradle' in files and relpath.find(V8_REL_PATH) < 0):
LOCAL_PROPERTIES_DIRS.append(os.path.join(CONFIGS['root-path'],\
relpath));
if ('Application.mk' in files and relpath.find(V8_REL_PATH) < 0):
MK_DIRS.append(os.path.join(CONFIGS['root-path'], relpath))
# remove unnecessary build.gradle dir
LOCAL_PROPERTIES_DIRS = \
filter(filter_local_properties, LOCAL_PROPERTIES_DIRS)
def check_application_mk():
SPECIAL_TAG = '#SPECIAL'
special_app_stl_pattern = re.compile('APP_STL[ ]*' + SPECIAL_TAG)
default_app_stl_pattern = re.compile('APP_STL[ ]*[:]?=[ ]*c\+\+_shared')
for mk_dir in MK_DIRS:
mk_path = os.path.join(mk_dir, 'Application.mk')
with open(mk_path, 'r') as f:
for line in f:
line = line.strip()
if (line.startswith('#')):
continue
if (re.match(special_app_stl_pattern, line)):
continue
if (line.startswith('APP_STL')):
if (re.match(default_app_stl_pattern, line) < 0):
print('APP_STL is not c++_shared in',line, mk_path)
sys.exit(1)
print('Applicatioin.mk is correct')
return
def main():
parse_args(sys.argv[1:])
get_local_properties_dirs_and_mk_dirs()
check_v8_dir()
check_application_mk()
add_local_properties()
if __name__ == '__main__':
main()
``` |
{
"source": "jianghaibo12138/mall4micro",
"score": 2
} |
#### File: jianghaibo12138/mall4micro/load_conf_to_consul.py
```python
import os
import requests
YAML_CONF_PATH = os.path.join(os.path.dirname(__file__), "yaml_conf")
CONSUL_URL = "http://127.0.0.1:8500/v1/kv"
TOKEN = os.getenv("CONSUL_TOKEN")
HEADER = {"X-Consul-Token": TOKEN, "Content-Type": "application.json"}
def upload_config_2_consul(file_name):
with open(os.path.join(YAML_CONF_PATH, file_name)) as f:
conf_lines = f.readlines()
response = requests.put(
"{}/{}".format(CONSUL_URL, file_name), headers=HEADER, data="".join(conf_lines))
return response.status_code == 200
def run():
file_list = os.listdir(YAML_CONF_PATH)
for file_name in file_list:
success = upload_config_2_consul(file_name)
if success:
print("[Load Config] file: {} success!".format(file_name))
else:
print("[Load Config] file: {} failed!".format(file_name))
if __name__ == '__main__':
run()
``` |
{
"source": "jianghaiming0707/python1806homework",
"score": 3
} |
#### File: home_work/App/models.py
```python
from django.db import models
# Create your models here.
class Myclass (models.Model):
name = models.CharField(max_length=30,verbose_name="班名",db_index=True)
ctime = models.DateTimeField(auto_now_add=True,verbose_name="创建时间")
isactive = models.BooleanField(verbose_name="活跃度")
def __str__(self):
return self.name
class Student(models.Model):
name = models.CharField(max_length=30,verbose_name="姓名",db_index=True)
stime = models.DateTimeField(auto_now_add=True, verbose_name="进班时间")
age = models.IntegerField(verbose_name="年龄")
grade= models.FloatField(verbose_name="成绩")
cid = models.ForeignKey(Myclass,verbose_name="班级ID",db_index=True)
def __str__(self):
return self.name
```
#### File: home_work/App/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from App.models import *
# Create your views here.
def search(seq):
myclass=Myclass.objects.all()
return render(seq,'test.html',context={'myclass':myclass})
def students(req):
students_id=req.GET.get('classid')
studentt=Student.objects.all()
studentt=studentt.filter(cid_id=students_id)
return render(req,'student.html',context={'students':studentt})
``` |
{
"source": "jianghaiming0707/whoami",
"score": 2
} |
#### File: whoami/teach04/models.py
```python
from django.db import models
# Create your models here.
class ComLanguage(models.Model):
name = models.CharField(
max_length=20,
verbose_name='语言名字'
)
desc = models.CharField(
max_length=200,
verbose_name='描述',
null = True
)
def get_desc(self):
return self.desc
class Engineer(models.Model):
name = models.CharField(
max_length=20,
verbose_name='名字'
)
sex = models.CharField(
max_length=4,
default='男'
)
comlanguage = models.ForeignKey(
ComLanguage,
verbose_name='编程语言'
)
start = models.DateTimeField(
auto_now_add=True,
verbose_name='从业时间'
)
``` |
{
"source": "jianghan0213/SMOKE",
"score": 2
} |
#### File: tools/pykitti_eval/kitti_eval.py
```python
import os
import numpy as np
from tools.pykitti_eval import kitti_common as kitti
from tools.pykitti_eval.eval import get_coco_eval_result, get_official_eval_result
def evaluate_kitti_mAP(gt_label_path, pred_label_path, class_name = ["Car", "Pedestrian", "Cyclist"]):
if not os.path.exists(gt_label_path):
print("gt_label_path not found")
if not os.path.exists(pred_label_path):
print("pred_label_path not found")
pred_annos, image_ids = kitti.get_label_annos(pred_label_path, return_ids=True)
gt_annos = kitti.get_label_annos(gt_label_path, image_ids=image_ids)
print("pred_annos: ", len(pred_annos))
print("gt_annos: ", len(gt_annos))
result_dict = dict()
if len(pred_annos) > 0:
result, mAPbbox, mAPbev, mAP3d, mAPaos = get_official_eval_result(gt_annos, pred_annos, class_name, return_data=True)
result_dict["mAPbbox"] = mAPbbox[0, :, 0] if mAPbbox is not None else np.zeros((3))
result_dict["mAPbev"] = mAPbev[0, :, 0] if mAPbev is not None else np.zeros((3))
result_dict["mAP3d"] = mAP3d[0, :, 0] if mAP3d is not None else np.zeros((3))
result_dict["mAPaos"] = mAPaos[0, :, 0] if mAPaos is not None else np.zeros((3))
result_dict["result"] = result if result is not None else ""
return result_dict
``` |
{
"source": "jianghan2013/NMR_clustering",
"score": 3
} |
#### File: NMR_clustering/utilis/evaluation.py
```python
import numpy as np
from sklearn.linear_model import LinearRegression
class Evaluate(object):
def __init__(self, model_names, X_train, y_preds, config,verbose=0):
self.distance_min = config['distance_min']
self.point_min = config['point_min'] #0.05, point_min = 50
self.model_names = model_names
self.X_train= X_train
self.y_preds = y_preds
self.verbose = verbose
self.metrics = {'ratios':{}, 'slopes': {}, 'inters':{}, 'slopes_raw':{}}
self.boundary_points = {}
def fit(self):
for model_name in self.model_names:
ratios = get_ratio_range(self.X_train, self.y_preds[model_name])
slopes, inters, slopes_raw, boundaries = get_boundary_and_slope(self.X_train, self.y_preds[model_name], self.distance_min, self.point_min)
self.metrics['ratios'][model_name] =ratios
self.metrics['slopes'][model_name] = slopes
self.metrics['slopes_raw'][model_name] = slopes_raw
self.metrics['inters'][model_name] = inters
self.boundary_points[model_name] = boundaries
if self.verbose:
print('model_name {}, metrics ratios {}, slopes {}, inters{}'.format(model_name,
self.metrics['ratios'][model_name], self.metrics['slopes'][model_name],
self.metrics['inters'][model_name]))
return self
def get_ratio_range(X_train, y_pred):
"""
Compute range ratio index
"""
range_ratios=[]
n_components = max(y_pred)+1
for i in range(n_components):
X_train_i = X_train[y_pred==i]
T2_v = 10**(X_train_i[:,0])
T1_v = 10**(X_train_i[:,1])
range_ratio = (np.max(T1_v/T2_v)/np.min(T1_v/T2_v))
range_ratios.append(range_ratio)
return range_ratios
def get_boundary_from_two_clusters_(cluster_a, cluster_b, distance_min = 0.05):
# cluster_a: shape(n,2)
# cluster_b: shape(n,2)
id_a =set()
id_b =set()# the pair of row id (i,j), i is for cluster_a and j is for cluster_b
for i in range(cluster_a.shape[0]):
#i = 0
clsuter_a_i = cluster_a[i,:]
distance_list = np.sqrt( (clsuter_a_i[0]-cluster_b[:,0])**2 + (clsuter_a_i[1]-cluster_b[:,1])**2)
distance_ = np.amin(distance_list) # mini distance
if distance_ < distance_min:
j = np.argmin(distance_list)
id_a.add(i)
id_b.add(j)
if len(id_a) == 0 and len(id_a) == 0:
return []
else:
id_a = list(id_a)
id_b = list(id_b)
id_a.sort()
id_b.sort()
boundary_points = np.vstack( (cluster_a[id_a,:],cluster_b[id_b,:] ) )
return boundary_points
def get_boundary_and_slope(X_train, y_pred, distance_min=0.05, point_min = 50):
# point_min minimum point for the boundary points
# get the decision boundary and their slopes
boundary_list = [] # contains all boundary points
slope_raw_list = []
angle_diff_list = [] # contains the slope for that boundary
inter_list = []
n_components = max(y_pred)+1
data_all = [X_train[y_pred==i] for i in range(n_components)] # get each cluster points
for i in range(n_components-1):
for j in range(i+1, n_components):
cluster_a = data_all[i]
cluster_b = data_all[j]
boundary_points = get_boundary_from_two_clusters_(cluster_a, cluster_b,distance_min = distance_min)
if len(boundary_points) > point_min:
boundary_list.append(boundary_points)
# linear regression
lr_ = LinearRegression()
X_ = boundary_points[:,0].reshape(-1,1)
y_ = boundary_points[:,1]
lr_.fit(X_,y_)
slope = lr_.coef_[0]/np.pi*180
inter = lr_.intercept_
slope_raw_list.append(slope)
inter_list.append(inter)
diff_slope = abs(slope-45)
angle_diff_list.append(diff_slope) # normalize slope
return angle_diff_list, inter_list, slope_raw_list, boundary_list
```
#### File: NMR_clustering/utilis/visualization.py
```python
from itertools import cycle, islice
import matplotlib.pyplot as plt
import numpy as np
def plot_Xtrain(X_train):
plt.figure()
plt.plot(X_train[:,0], X_train[:,1], 'o', markersize=2, alpha=0.1)
plt.xlim([-2,3])
plt.ylim([-2,3])
plt.xlabel('log(T2) (ms)')
plt.ylabel('log(T1) (ms)')
def plot_clustering(X_train, y_pred, model_name = 'model',plot_save = False,
plot_line = True, plot_center = False, plot_title=True):
colors = np.array(list(islice(cycle(['b', 'y','r', 'g','purple','k','maroon','olive']),int(max(y_pred) + 1))))
plt.figure(figsize=(5,5))
plt.scatter(X_train[:, 0], X_train[:, 1], s=3, color=colors[y_pred], alpha=0.2)
plt.xlim([-2,3])
plt.ylim([-2,3])
plt.xticks([-2,-1,0,1,2,3],fontsize=15)
plt.yticks([-2,-1,0,1,2,3],fontsize=15)
if plot_line:
plt.plot([-2,3],[-2,3],'k')
plt.plot([-2,3],[-1,4],'k')
plt.plot([-2,3],[-0,5],'k')
if plot_center:
for (x,y,label) in centers:
plt.text(x,y,int(label), fontsize=30)
plt.plot(x,y,'k*')
#plt.xlabel(r'$\log{T_2}$')
#plt.ylabel(r'$\log{T_1}$')
if plot_title:
plt.title(model_name,fontsize=18)
if plot_save:
plt.savefig(model_name)
# plot for bd
def plot_boundary(boundary_list,model_name='sample',plot_title = True,plot_save = False):
plt.figure(figsize=(5,5))
for bd_points in boundary_list:
plt.plot(bd_points[:,0],bd_points[:,1],'.')
plt.plot([-2,3],[-2,3],'k')
plt.plot([-2,3],[-1,4],'k')
plt.plot([-2,3],[0,5],'k')
plt.xlim(-2., 3.)
plt.ylim(-2., 3.)
plt.xlabel('log T2 (ms)')
plt.ylabel('log T1 (ms)')
if plot_title:
plt.title(model_name,fontsize=18)
if plot_save:
plt.savefig(model_name)
``` |
{
"source": "jianghaochu/econlib",
"score": 4
} |
#### File: econlib/econlib/example_module.py
```python
class ExampleClass(object):
"""The summary line for a class docstring should fit on one line.
If the class has public attributes, they may be documented here
in an ``Attributes`` section and follow the same formatting as a
function's ``Args`` section. Alternatively, attributes may be documented
inline with the attribute's declaration (see __init__ method below).
Properties created with the ``@property`` decorator should be documented
in the property's getter method.
Parameters
----------
param1 : str
Description of `param1`.
param2 : :obj:`list` of :obj:`str`
Description of `param2`. Multiple
lines are supported.
param3 : :obj:`int`, optional
Description of `param3`.
Attributes
----------
attr1 : str
Description of `attr1`.
attr2 : :obj:`int`, optional
Description of `attr2`.
"""
def __init__(self, param1, param2, param3):
self.attr1 = param1
self.attr2 = param2
self.attr3 = param3 #: Doc comment *inline* with attribute
#: list of str: Doc comment *before* attribute, with type specified
self.attr4 = ["attr4"]
self.attr5 = None
"""str: Docstring *after* attribute, with type specified."""
@property
def readonly_property(self):
"""str: Properties should be documented in their getter method."""
return "readonly_property"
@property
def readwrite_property(self):
""":obj:`list` of :obj:`str`: Properties with both a getter and setter
should only be documented in their getter method.
If the setter method contains notable behavior, it should be
mentioned here.
"""
return ["readwrite_property"]
@readwrite_property.setter
def readwrite_property(self, value):
value
def example_method(self, param1, param2):
"""Class methods are similar to regular functions.
Note
----
Do not include the `self` parameter in the ``Parameters`` section.
Parameters
----------
param1
The first parameter.
param2
The second parameter.
Returns
-------
bool
True if successful, False otherwise.
"""
return True
def __special__(self):
"""By default special members with docstrings are not included.
Special members are any methods or attributes that start with and
end with a double underscore. Any special member with a docstring
will be included in the output, if
``napoleon_include_special_with_doc`` is set to True.
This behavior can be enabled by changing the following setting in
Sphinx's conf.py::
napoleon_include_special_with_doc = True
"""
pass
def __special_without_docstring__(self):
pass
def _private(self):
"""By default private members are not included.
Private members are any methods or attributes that start with an
underscore and are *not* special. By default they are not included
in the output.
This behavior can be changed such that private members *are* included
by changing the following setting in Sphinx's conf.py::
napoleon_include_private_with_doc = True
"""
pass
def _private_without_docstring(self):
pass
``` |
{
"source": "JiangHaoJoinGitHubj/dcrameri",
"score": 2
} |
#### File: dcrameri/sentry/events.py
```python
import re
import sys
from sentry import app
from sentry.utils import transform
__all__ = ('BaseEvent', 'Exception', 'Message', 'Query')
class BaseEvent(object):
def to_string(self, data):
raise NotImplementedError
def get_data(self, **kwargs):
return {}
def get_tags(self, **kwargs):
return []
def capture(self, **kwargs):
# tags and culprit are special cased and not stored with the
# default metadata
return {
'culprit': None,
'tags': self.get_tags(**kwargs),
self.interface: self.get_data(**kwargs),
}
class Exception(BaseEvent):
"""
Exceptions store the following metadata:
- value: 'My exception value'
- type: 'module.ClassName'
- frames: a list of serialized frames (see _get_traceback_frames)
- template: 'template/name.html'
"""
interface = 'sentry.interfaces.Exception'
def to_string(self, data):
if data['value']:
return '%s: %s' % (data['type'], data['value'])
return data['type']
def get_event_hash(self, type, value, **kwargs):
# TODO: Need to add in the frames without line numbers
return [type, value]
def capture(self, exc_info=None, **kwargs):
if exc_info is None:
exc_info = sys.exc_info()
exc_type, exc_value, exc_traceback = exc_info
tags = [('level', 'error')]
culprit = self._get_culprit(exc_info[2])
if hasattr(exc_type, '__class__'):
exc_module = exc_type.__class__.__module__
if exc_module == '__builtin__':
exc_type = exc_type.__name__
else:
exc_type = '%s.%s' % (exc_module, exc_type.__name__)
else:
exc_module = None
exc_type = exc_type.__name__
# if isinstance(exc_value, TemplateSyntaxError) and hasattr(exc_value, 'source'):
# origin, (start, end) = exc_value.source
# result['template'] = (origin.reload(), start, end, origin.name)
# result['tags'].append(('template', origin.loadname))
return {
'culprit': culprit,
'tags': tags,
'sentry.interfaces.Exception': {
'value': transform(exc_value),
'type': exc_type,
},
'sentry.interfaces.Stacktrace': {
'frames': self._get_traceback_frames(exc_traceback)
},
}
def _iter_tb(self, tb):
while tb:
# support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
continue
yield tb
tb = tb.tb_next
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
source = loader.get_source(module_name)
if source is not None:
source = source.splitlines()
if source is None:
try:
f = open(filename)
try:
source = f.readlines()
finally:
f.close()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(r'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1)
break
source = [unicode(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = [line.strip('\n') for line in source[lower_bound:lineno]]
context_line = source[lineno].strip('\n')
post_context = [line.strip('\n') for line in source[lineno+1:upper_bound]]
return lower_bound, pre_context, context_line, post_context
def _get_culprit(self, traceback):
# We iterate through each frame looking for a deterministic culprit
# When one is found, we mark it as last "best guess" (best_guess) and then
# check it against SENTRY_EXCLUDE_PATHS. If it isnt listed, then we
# use this option. If nothing is found, we use the "best guess".
def contains(iterator, value):
for k in iterator:
if value.startswith(k):
return True
return False
if app.config['INCLUDE_PATHS']:
modules = app.config['INCLUDE_PATHS']
else:
modules = []
best_guess = None
for tb in self._iter_tb(traceback):
frame = tb.tb_frame
try:
culprit = '.'.join([frame.f_globals['__name__'], frame.f_code.co_name])
except:
continue
if contains(modules, culprit):
if not (contains(app.config['EXCLUDE_PATHS'], culprit) and best_guess):
best_guess = culprit
elif best_guess:
break
return best_guess
def _get_traceback_frames(self, tb):
frames = []
for tb in self._iter_tb(tb):
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__')
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is not None:
frames.append({
'id': id(tb),
'filename': filename,
'module': module_name,
'function': function,
'lineno': lineno + 1,
# TODO: vars need to be references
'vars': tb.tb_frame.f_locals,
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
return frames
class Message(BaseEvent):
"""
Messages store the following metadata:
- message: 'My message from %s about %s'
- params: ('foo', 'bar')
"""
interface = 'sentry.interfaces.Message'
def to_string(self, data):
return data['message'] % tuple(data.get('params', ()))
def get_event_hash(self, message, params=(), **kwargs):
return [message] + list(params)
def get_data(self, message, params=(), **kwargs):
return {
'message': message,
'params': params,
}
class Query(BaseEvent):
"""
Messages store the following metadata:
- query: 'SELECT * FROM table'
- engine: 'postgesql_psycopg2'
"""
interface = 'sentry.interfaces.Query'
def to_string(self, data):
return data['query']
def get_event_hash(self, query, engine, **kwargs):
return [query, engine]
def get_data(self, query, engine, **kwargs):
return {
'query': query,
'engine': engine,
}
```
#### File: dcrameri/sentry/models.py
```python
from __future__ import absolute_import
import datetime
import hashlib
from sentry.interfaces import unserialize
from sentry.db import models
from sentry.utils.compat import math
class Group(models.Model):
"""
Stores an aggregate (summary) of Event's for a combination of tags
given a slice.
"""
# key is (type, hash)
# this is the combination of md5(' '.join(tags)) + md5(event)
type = models.String() # length 32
hash = models.String() # length 32
# one line summary used for rendering
message = models.Text()
state = models.Integer(default=0)
count = models.Integer(default=0)
score = models.Float(default=0.0)
time_spent = models.Integer(default=0)
first_seen = models.DateTime(default=datetime.datetime.now)
last_seen = models.DateTime(default=datetime.datetime.now)
# This is a meta element which needs magically created or something
# score = models.Float(default=0.0)
tags = models.List()
class Meta:
ordering = 'last_seen'
sortables = ('time_spent', 'first_seen', 'last_seen', 'score')
indexes = (('type', 'hash'),)
def save(self, *args, **kwargs):
created = not self.pk
self.score = self.get_score()
super(Group, self).save(*args, **kwargs)
if created:
EventType.add_group(self)
Tag.add_group(self)
def delete(self, *args, **kwargs):
super(Group, self).delete(*args, **kwargs)
EventType.remove_group(self)
Tag.remove_group(self)
def get_score(self):
return float(abs(math.log(self.count) * 600 + float(self.last_seen.strftime('%s.%m'))))
class Event(models.Model):
"""
An individual event. It's processor (type) handles input and output, as well as
group summarization.
"""
# the hash of this event is defined by its processor (type)
hash = models.String()
type = models.String()
date = models.DateTime(default=datetime.datetime.now)
time_spent = models.Integer(default=0) # in ms
tags = models.List()
class Meta:
ordering = 'date'
def get_version(self):
if not self.data:
return
if 'version' not in self.data:
return
return self.data['version']
def get_processor(self):
# TODO: should use general import cache
mod_name, class_name = self.type.rsplit('.', 1)
processor = getattr(__import__(mod_name, {}, {}, [class_name]), class_name)()
return processor
def get_interfaces(self):
# TODO: should use general import cache
interfaces = []
for k, v in self.data.iteritems():
if '.' not in k:
continue
mod_name, class_name = k.rsplit('.', 1)
interface = getattr(__import__(mod_name, {}, {}, [class_name]), class_name)
interfaces.append(unserialize(interface, v))
return interfaces
class EventType(models.Model):
"""
Stores a list of all event types seen, as well as
a tally of the number of events recorded.
"""
# full module path to Event class, e.g. sentry.events.Exception
path = models.String()
# number of unique groups seen for this event
count = models.Integer(default=0)
class Meta:
ordering = 'count'
indexes = (('path',),)
def __unicode__(self):
return self.path
@classmethod
def add_group(cls, group):
et, created = cls.objects.get_or_create(
path=group.type,
defaults={
'count': 1,
}
)
if not created:
et.incr('count', 1)
@classmethod
def remove_group(cls, group):
try:
et = cls.objects.get(path=group.type)
except EventType.DoesNotExist:
return
et.decr('count', 1)
if et.count <= 0:
et.delete()
class Tag(models.Model):
"""
Stores a unique value of a tag.
"""
key = models.String() # length 16?
# hash is md5('key=value')
hash = models.String() # length 32
value = models.String()
count = models.Integer(default=0)
class Meta:
ordering = 'count'
indexes = (('hash',), ('key',))
def __unicode__(self):
return u"%s=%s; count=%s" % (self.key, self.value, self.count)
@classmethod
def add_group(cls, group):
for key, value in group.tags:
hash = hashlib.md5(u'%s=%s' % (key, value)).hexdigest()
tag, created = cls.objects.get_or_create(
hash=hash,
defaults={
'key': key,
'value': value,
'count': 1,
}
)
if not created:
tag.incr('count', 1)
@classmethod
def remove_group(cls, group):
for key, value in group.tags:
try:
tag = cls.objects.get(hash=hash)
except cls.DoesNotExist:
continue
tag.decr('count', 1)
if tag.count <= 0:
tag.delete()
```
#### File: sentry/utils/__init__.py
```python
import datetime
import hashlib
import logging
import sys
import uuid
import warnings
from pprint import pformat
from types import ClassType, TypeType
import sentry
from sentry.utils.encoding import force_unicode
def construct_checksum(level=logging.ERROR, class_name='', traceback='', message='', **kwargs):
checksum = hashlib.md5(str(level))
checksum.update(class_name or '')
if traceback:
traceback = '\n'.join(traceback.split('\n')[:-3])
message = traceback or message
if isinstance(message, unicode):
message = message.encode('utf-8', 'replace')
checksum.update(message)
return checksum.hexdigest()
def varmap(func, var, context=None):
if context is None:
context = {}
objid = id(var)
if objid in context:
return func('<...>')
context[objid] = 1
if isinstance(var, dict):
ret = dict((k, varmap(func, v, context)) for k, v in var.iteritems())
elif isinstance(var, (list, tuple)):
ret = [varmap(func, f, context) for f in var]
else:
ret = func(var)
del context[objid]
return ret
def has_sentry_metadata(value):
try:
return callable(getattr(value, '__sentry__', None))
except:
return False
def transform(value, stack=[], context=None):
# TODO: make this extendable
# TODO: include some sane defaults, like UUID
# TODO: dont coerce strings to unicode, leave them as strings
if context is None:
context = {}
objid = id(value)
if objid in context:
return '<...>'
context[objid] = 1
if any(value is s for s in stack):
ret = 'cycle'
transform_rec = lambda o: transform(o, stack + [value], context)
if isinstance(value, (tuple, list, set, frozenset)):
try:
ret = type(value)(transform_rec(o) for o in value[:])
except:
ret = tuple(transform_rec(o) for o in value)
elif isinstance(value, uuid.UUID):
ret = repr(value)
elif isinstance(value, datetime.datetime):
ret = value.strftime('%Y-%m-%dT%H:%M:%S.%f')
elif isinstance(value, datetime.date):
ret = value.strftime('%Y-%m-%d')
elif isinstance(value, dict):
ret = dict((k, transform_rec(v)) for k, v in value.iteritems())
elif isinstance(value, unicode):
ret = to_unicode(value)
elif isinstance(value, str):
try:
ret = str(value)
except:
ret = to_unicode(value)
elif not isinstance(value, (ClassType, TypeType)) and \
has_sentry_metadata(value):
ret = transform_rec(value.__sentry__())
elif not isinstance(value, (int, bool)) and value is not None:
# XXX: we could do transform(repr(value)) here
ret = to_unicode(value)
else:
ret = value
del context[objid]
return ret
def to_unicode(value):
try:
value = unicode(force_unicode(value))
except (UnicodeEncodeError, UnicodeDecodeError):
value = '(Error decoding value)'
except Exception: # in some cases we get a different exception
try:
value = str(repr(type(value)))
except Exception:
value = '(Error decoding value)'
return value
class _Missing(object):
def __repr__(self):
return 'no value'
def __reduce__(self):
return '_missing'
_missing = _Missing()
class cached_property(object):
# This is borrowed from werkzeug : http://bytebucket.org/mitsuhiko/werkzeug-main
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
.. versionchanged:: 0.6
the `writeable` attribute and parameter was deprecated. If a
cached property is writeable or not has to be documented now.
For performance reasons the implementation does not honor the
writeable setting and will always make the property writeable.
"""
# implementation detail: this property is implemented as non-data
# descriptor. non-data descriptors are only invoked if there is
# no entry with the same name in the instance's __dict__.
# this allows us to completely get rid of the access function call
# overhead. If one choses to invoke __get__ by hand the property
# will still work as expected because the lookup logic is replicated
# in __get__ for manual invocation.
def __init__(self, func, name=None, doc=None, writeable=False):
if writeable:
warnings.warn(DeprecationWarning('the writeable argument to the '
'cached property is a noop since 0.6 '
'because the property is writeable '
'by default for performance reasons'))
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
def get_versions(module_list=[]):
# TODO:
ext_module_list = set()
for m in module_list:
parts = m.split('.')
ext_module_list.update('.'.join(parts[:idx]) for idx in xrange(1, len(parts)+1))
versions = {}
for module_name in ext_module_list:
__import__(module_name)
app = sys.modules[module_name]
if hasattr(app, 'get_version'):
get_version = app.get_version
if callable(get_version):
version = get_version()
else:
version = get_version
elif hasattr(app, 'VERSION'):
version = app.VERSION
elif hasattr(app, '__version__'):
version = app.__version__
else:
continue
if isinstance(version, (list, tuple)):
version = '.'.join(str(o) for o in version)
versions[module_name] = version
return versions
def shorten(var):
var = transform(var)
if isinstance(var, basestring) and len(var) > sentry.app.config['MAX_LENGTH_STRING']:
var = var[:sentry.app.config['MAX_LENGTH_STRING']] + '...'
elif isinstance(var, (list, tuple, set, frozenset)) and len(var) > sentry.app.config['MAX_LENGTH_LIST']:
# TODO: we should write a real API for storing some metadata with vars when
# we get around to doing ref storage
# TODO: when we finish the above, we should also implement this for dicts
var = list(var)[:sentry.app.config['MAX_LENGTH_LIST']] + ['...', '(%d more elements)' % (len(var) - sentry.app.config['MAX_LENGTH_LIST'],)]
return var
def is_float(var):
try:
float(var)
except ValueError:
return False
return True
class MockRequest(object):
GET = {}
POST = {}
META = {}
COOKIES = {}
FILES = {}
raw_post_data = ''
url = ''
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = pformat(self.GET)
except:
get = '<could not parse>'
try:
post = pformat(self.POST)
except:
post = '<could not parse>'
try:
cookies = pformat(self.COOKIES)
except:
cookies = '<could not parse>'
try:
meta = pformat(self.META)
except:
meta = '<could not parse>'
return '<Request\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % \
(get, post, cookies, meta)
def build_absolute_uri(self): return self.url
```
#### File: sentry/utils/shortcuts.py
```python
from flask import abort
def get_object_or_404(Model, **kwargs):
try:
return Model.objects.get(**kwargs)
except Model.DoesNotExist:
abort(404)
```
#### File: tests/test_backends/test_redis.py
```python
from .. import BaseTest
from sentry.db.backends.redis import RedisBackend
class MockModel(object):
__name__ = 'test'
class RedisBackendTest(BaseTest):
def setUp(self):
self.backend = RedisBackend(db=9)
self.schema = MockModel()
self.redis = self.backend.conn
def test_add(self):
pk1 = self.backend.add(self.schema, **{'foo': 'bar'})
self.assertTrue(pk1)
key = self.backend._get_data_key(self.schema, pk1)
self.assertEquals(len(self.redis.hgetall(key)), 1)
self.assertEquals(self.redis.hget(key, 'foo'), 'bar')
pk2 = self.backend.add(self.schema)
self.assertTrue(pk2)
self.assertNotEquals(pk1, pk2)
key = self.backend._get_data_key(self.schema, pk2)
self.assertFalse(self.redis.hgetall(key))
def test_delete(self):
pk = 'foo'
key = self.backend._get_data_key(self.schema, pk)
metakey = self.backend._get_metadata_key(self.schema, pk)
self.redis.hset(key, pk, {'foo': 'bar'})
self.redis.hset(metakey, pk, {'foo': 'bar'})
self.backend.delete(self.schema, pk)
self.assertFalse(self.redis.hgetall(key))
self.assertFalse(self.redis.hgetall(metakey))
def test_set(self):
pk = 'foo'
key = self.backend._get_data_key(self.schema, pk)
self.backend.set(self.schema, pk, **{'foo': 'bar'})
self.assertEquals(len(self.redis.hgetall(key)), 1)
self.assertEquals(self.redis.hget(key, 'foo'), 'bar')
```
#### File: tests/test_clients/test_logging.py
```python
from .. import BaseTest
import logging
from sentry.client import get_client
from sentry.models import Event
class LoggingTest(BaseTest):
def test_simple(self):
client = get_client('sentry.client.logging.LoggingSentryClient')
_foo = {'': None}
class handler(logging.Handler):
def emit(self, record):
_foo[''] = record
logger = client.logger
logger.addHandler(handler())
event_id = client.capture('Message', message='hello world')
self.assertRaises(Event.DoesNotExist, Event.objects.get, event_id)
self.assertEquals(_foo[''].getMessage(), 'hello world')
self.assertEquals(_foo[''].levelno, client.default_level)
```
#### File: dcrameri/tests/test_orm.py
```python
from . import BaseTest
from sentry.db import models
class TestModel(models.Model):
str_ = models.String()
int_ = models.Integer()
float_ = models.Float()
list_ = models.List()
class Meta:
sortables = ('int_', 'float_')
indexes = (('str_',),)
class ORMTest(BaseTest):
def test_create(self):
inst = TestModel.objects.create(
str_='foo',
int_=0,
float_=0.1,
list_=[1, 2, 3],
)
self.assertEquals(TestModel.objects.count(), 1)
self.assertTrue(inst.pk)
self.assertEquals(inst.str_, 'foo')
self.assertEquals(inst.int_, 0)
self.assertEquals(inst.float_, 0.1)
self.assertEquals(len(inst.list_), 3)
self.assertTrue(1 in inst.list_)
self.assertTrue(2 in inst.list_)
self.assertTrue(3 in inst.list_)
def test_get_or_create(self):
inst, created = TestModel.objects.get_or_create(str_='foo', defaults={
'int_': 0,
'float_': 0.1,
'list_': [1, 2, 3],
})
self.assertTrue(created)
self.assertEquals(TestModel.objects.count(), 1)
self.assertTrue(inst.pk)
self.assertEquals(inst.str_, 'foo')
self.assertEquals(inst.int_, 0)
self.assertEquals(inst.float_, 0.1)
self.assertEquals(len(inst.list_), 3)
self.assertTrue(1 in inst.list_)
self.assertTrue(2 in inst.list_)
self.assertTrue(3 in inst.list_)
inst, created = TestModel.objects.get_or_create(str_='foo', defaults={
'int_': 1,
'float_': 1.1,
'list_': [1],
})
self.assertFalse(created)
self.assertEquals(TestModel.objects.count(), 1)
self.assertTrue(inst.pk)
self.assertEquals(inst.str_, 'foo')
self.assertEquals(inst.int_, 0)
self.assertEquals(inst.float_, 0.1)
self.assertTrue(len(inst.list_), 3)
self.assertTrue(1 in inst.list_)
self.assertTrue(2 in inst.list_)
self.assertTrue(3 in inst.list_)
def test_get(self):
self.assertEquals(TestModel.objects.count(), 0)
self.assertRaises(TestModel.DoesNotExist, TestModel.objects.get, 'foo')
inst = TestModel.objects.create(str_='foo')
self.assertEquals(TestModel.objects.count(), 1)
self.assertEquals(TestModel.objects.get(inst.pk), inst)
def test_delete(self):
self.assertEquals(TestModel.objects.count(), 0)
inst = TestModel.objects.create(str_='foo')
self.assertEquals(TestModel.objects.count(), 1)
inst.delete()
self.assertEquals(TestModel.objects.count(), 0)
self.assertRaises(TestModel.DoesNotExist, TestModel.objects.get, 'foo')
def test_saving_behavior(self):
self.assertEquals(TestModel.objects.count(), 0)
inst = TestModel()
self.assertFalse(inst.pk)
self.assertEquals(TestModel.objects.count(), 0)
inst.save()
self.assertTrue(inst.pk)
self.assertEquals(TestModel.objects.count(), 1)
self.assertEquals(TestModel.objects.get(inst.pk), inst)
self.assertEquals(inst.str_, '')
self.assertEquals(inst.int_, 0)
self.assertEquals(inst.float_, 0.0)
self.assertEquals(len(inst.list_), 0)
inst.update(str_='foo')
self.assertEquals(TestModel.objects.count(), 1)
self.assertEquals(inst.str_, 'foo')
self.assertEquals(inst.int_, 0)
self.assertEquals(inst.float_, 0.0)
self.assertEquals(len(inst.list_), 0)
inst = TestModel.objects.get(pk=inst.pk)
self.assertEquals(inst.str_, 'foo')
self.assertEquals(inst.int_, 0)
self.assertEquals(inst.float_, 0.0)
self.assertEquals(len(inst.list_), 0)
inst = TestModel(float_=1.0)
self.assertFalse(inst.pk)
inst.save()
self.assertEquals(TestModel.objects.count(), 2)
self.assertEquals(inst.str_, '')
self.assertEquals(inst.int_, 0)
self.assertEquals(inst.float_, 1.0)
self.assertEquals(len(inst.list_), 0)
``` |
{
"source": "JiangHaoyang/Tensor-based-face-recognition-algorithm-and-modeling",
"score": 2
} |
#### File: JiangHaoyang/Tensor-based-face-recognition-algorithm-and-modeling/TensorfaceAndRandOneMethod.py
```python
import numpy as np
import os
import cv2 as cv
import matplotlib.pyplot as plt
import scipy.io as sio
import FaceDataIO as fdio
import tensortoolbox as ttl
from tensorly.decomposition import parafac
################################### Improved Method ###################################
def run():
database=np.load('TensorfaceParas.npy')[0]
if database=='yaleB':
##################################################### Trianning set
[subs,poses,illums]=np.load('trainSet_paras.npy')
len_of_c_ref=len(subs)
# read the corresponding image
[FaceTensor,_,_,_]=fdio.readFaceTensor('yaleB','select',[subs,poses,illums])
# calculate the base
U=[]
for i in range(5):
mat=ttl.tenmat(FaceTensor,i)
U.append(np.linalg.eig(np.dot(mat,mat.T))[1])
#Z=ttl.ttm(FaceTensor,[U[0].T,U[1].T,U[2].T,U[3].T,U[4].T],list(range(5)))
B=ttl.ttm(FaceTensor,[U[0].T,U[1].T,U[2].T],list(range(3)))
if (os.path.exists('inv_B_T.npy')==False):
# calculate the transfer matrix
B_=np.zeros([np.prod(B.shape[0:3]),np.prod(B.shape[3:5])])
for i in range(len(subs)):
for j in range(len(poses)):
B_[(i*len(poses)+j)*len(illums):(i*len(poses)+j+1)*len(illums),:]=\
ttl.tenmat(B[i,j,:,:,:],0)
# calculate the invert matrix of the base, it take some time
print('calculating the invert matrix')
inv_B_T=np.linalg.pinv(B_.T)
print('finish calculation')
# np.save('inv_B_T.npy',[inv_B_T])
np.save('B_shape.npy',[B.shape])
else:
[inv_B_T]=np.load('inv_B_T.npy')
print('loading the old invert matrix. Delete the file if want a new one')
# calculate the Cp_ref
Cp_ref=np.zeros([len(subs),len(subs)])
I=ttl.tenmat(FaceTensor[:,1,1,:,:],0)
Cp=np.dot(inv_B_T,I.T)
for i in range(I.shape[0]):
Cp_tmp=Cp[:,i]
Cp_tmp=np.reshape(Cp_tmp,[len(subs),len(poses),len(illums)])
Cp_ref[:,i]=parafac(Cp_tmp,rank=1)[0].reshape(Cp_ref[:,i].shape)
Cp_ref[:,i]=Cp_ref[:,i]*np.sign(Cp_ref[0,i])
#print(Cp_ref)
##################################################### Testing set
# load the testing set parameters
[test_subs,test_poses,test_illums]=np.load('testSet_paras.npy')
# read the corresponding image
[FaceTensor,_,_,_]=fdio.readFaceTensor('yaleB','select',[test_subs,test_poses,test_illums])
# creat empty matrix to store Cp for the testing set
Cp=np.zeros([len(test_subs),len(test_poses),len(test_illums),len_of_c_ref])
# process each image one by one
for i in range(len(test_subs)):
print(i)
for j in range(len(test_poses)):
# load images set of the same subject and same pose
images_set=ttl.tenmat(FaceTensor[i,j,:,:,:],0)
for k in range(len(test_illums)):
# process one image each time
one_image=images_set[k,:]
# calculate the Cp for this image
Cp_this_image=np.dot(inv_B_T,one_image.T)
Cp_this_image=np.reshape(Cp_this_image,[len(subs),len(poses),len(illums)])
Cp[i,j,k,:]=parafac(Cp_this_image,rank=1)[0].reshape(Cp[i,j,k,:].shape)
Cp[i,j,k,:]=Cp[i,j,k,:]*np.sign(Cp[i,j,k,0])
# store the result
np.save('improvedMethod_Cp.npy',Cp)
predictMap=np.zeros([len(test_subs),len(test_poses),len(test_illums)])
for i in range(len(test_subs)):
for j in range(len(test_poses)):
for k in range(len(test_illums)):
Cp_tmp=Cp[i,j,k,:].reshape([Cp.shape[3],1])*np.ones([1,Cp_ref.shape[1]])
tmp=np.sqrt(np.sum(np.square(Cp_tmp-Cp_ref),0))
#print(tmp)
predictMap[i,j,k]=np.argmin(tmp)
Correct_map=np.arange(len(test_subs)).reshape([len(test_subs),1,1])\
*np.ones([1,len(test_poses),1])*np.ones([1,1,len(test_illums)])
acc=(Correct_map==predictMap).sum()/predictMap.size
print(acc)
'''
Cp=Cp.reshape([np.prod(Cp.shape[0:3]),Cp.shape[3]])
Correct_map=Correct_map.reshape([np.prod(Correct_map.shape[0:3]),1])
np.save('SVMtest_Cp.npy',Cp)
np.save('SVMtest_y.npy',Correct_map)
'''
if database=='AR' or database=='PIE':
##################################################### Trianning set
[subs,nums]=np.load('trainSet_paras.npy')
len_of_c_ref=len(subs)
# read the corresponding image
[FaceTensor,_,_]=fdio.readFaceTensor(database,'select',[subs,nums])
# calculate the base
U=[]
for i in range(4):
mat=ttl.tenmat(FaceTensor,i)
U.append(np.linalg.eig(np.dot(mat,mat.T))[1])
#Z=ttl.ttm(FaceTensor,[U[0].T,U[1].T,U[2].T,U[3].T,U[4].T],list(range(5)))
B=ttl.ttm(FaceTensor,[U[0].T,U[1].T],list(range(2)))
if (os.path.exists('inv_B_T.npy')==False):
# calculate the transfer matrix
B_=np.zeros([np.prod(B.shape[0:2]),np.prod(B.shape[2:4])])
for i in range(len(subs)):
B_[i*len(nums):(i+1)*len(nums),:]=ttl.tenmat(B[i,:,:,:],0)
# calculate the invert matrix of the base, it take some time
print('calculating the invert matrix')
inv_B_T=np.linalg.pinv(B_.T)
print('finish calculation')
# np.save('inv_B_T.npy',[inv_B_T])
np.save('B_shape.npy',[B.shape])
else:
[inv_B_T]=np.load('inv_B_T.npy')
print('loading the old invert matrix. Delete the file if want a new one')
# calculate the Cp_ref
Cp_ref=np.zeros([len(subs),len(subs)])
I=ttl.tenmat(FaceTensor[:,1,:,:],0)
Cp=np.dot(inv_B_T,I.T)
for i in range(I.shape[0]):
Cp_tmp=Cp[:,i]
Cp_tmp=np.reshape(Cp_tmp,[len(subs),len(nums)])
Cp_ref[:,i]=parafac(Cp_tmp,rank=1)[0].reshape(Cp_ref[:,i].shape)
Cp_ref[:,i]=Cp_ref[:,i]*np.sign(Cp_ref[0,i])
#print(Cp_ref)
##################################################### Testing set
# load the testing set parameters
[test_subs,test_nums]=np.load('testSet_paras.npy')
# read the corresponding image
[FaceTensor,_,_]=fdio.readFaceTensor(database,'select',[test_subs,test_nums])
# creat empty matrix to store Cp for the testing set
Cp=np.zeros([len(test_subs),len(test_nums),len_of_c_ref])
# process each image one by one
for i in range(len(test_subs)):
print(i)
# load images set of the same subject
images_set=ttl.tenmat(FaceTensor[i,:,:,:],0)
for j in range(len(test_nums)):
# process one image each time
one_image=images_set[j,:]
# calculate the Cp for this image
Cp_this_image=np.dot(inv_B_T,one_image.T)
Cp_this_image=np.reshape(Cp_this_image,[len(subs),len(nums)])
Cp[i,j,:]=parafac(Cp_this_image,rank=1)[0].reshape(Cp[i,j,:].shape)
Cp[i,j,:]=Cp[i,j,:]*np.sign(Cp[i,j,0])
# store the result
np.save('improvedMethod_Cp.npy',Cp)
predictMap=np.zeros([len(test_subs),len(test_nums)])
for i in range(len(test_subs)):
for j in range(len(test_nums)):
Cp_tmp=Cp[i,j,:].reshape([Cp.shape[2],1])*np.ones([1,Cp_ref.shape[1]])
tmp=np.sqrt(np.sum(np.square(Cp_tmp-Cp_ref),0))
predictMap[i,j]=np.argmin(tmp)
Correct_map=np.arange(len(test_subs)).reshape([len(test_subs),1])\
*np.ones([1,len(test_nums)])
acc=(Correct_map==predictMap).sum()/predictMap.size
print(acc)
return acc
``` |
{
"source": "jianghaoyuan2007/LeetCodeSupport",
"score": 4
} |
#### File: leetcodesupport/support/binarytreecreation.py
```python
from ..datastructure import TreeNode
# 利用完全二叉树的性质,结点 i 的左右子结点分别为 2i 和 2i + 1。
def create_subtree_nodes(node, index, nodes):
left_subtree_index = index * 2
right_subtree_index = index * 2 + 1
if left_subtree_index < len(nodes):
value = nodes[left_subtree_index]
if value is not None:
node.left = TreeNode(value)
create_subtree_nodes(node.left, left_subtree_index, nodes)
if right_subtree_index < len(nodes):
value = nodes[right_subtree_index]
if value is not None:
node.right = TreeNode(value)
create_subtree_nodes(node.right, right_subtree_index, nodes)
def create_binary_tree(nodes):
if len(nodes) == 0:
return None
else:
nodes = [None] + nodes
root = TreeNode(nodes[1])
create_subtree_nodes(root, 1, nodes)
return root
def string_to_tree_node(string):
string = string.strip()
string = string[1:-1]
if not string:
return None
input_values = [s.strip() for s in string.split(',')]
root = TreeNode(int(input_values[0]))
node_queue = [root]
front = 0
index = 1
while index < len(input_values):
node = node_queue[front]
front = front + 1
item = input_values[index]
index = index + 1
if item != "null":
left_number = int(item)
node.left = TreeNode(left_number)
node_queue.append(node.left)
if index >= len(input_values):
break
item = input_values[index]
index = index + 1
if item != "null":
right_number = int(item)
node.right = TreeNode(right_number)
node_queue.append(node.right)
return root
``` |
{
"source": "Jiang-HB/AC_CDQ",
"score": 2
} |
#### File: minatar/environments/asterix.py
```python
import numpy as np
#####################################################################################################################
# Constants
#
#####################################################################################################################
ramp_interval = 100
init_spawn_speed = 10
init_move_interval = 5
shot_cool_down = 5
#####################################################################################################################
# Env
#
# The player can move freely along the 4 cardinal directions. Enemies and treasure spawn from the sides. A reward of
# +1 is given for picking up treasure. Termination occurs if the player makes contact with an enemy. Enemy and
# treasure direction are indicated by a trail channel. Difficulty is periodically increased by increasing the speed
# and spawn rate of enemies and treasure.
#
#####################################################################################################################
class Env:
def __init__(self, ramping = True, seed = None):
self.channels ={
'player':0,
'enemy':1,
'trail':2,
'gold':3
}
self.action_map = ['n','l','u','r','d','f']
self.ramping = ramping
self.random = np.random.RandomState(seed)
self.reset()
# Update environment according to agent action
def act(self, a):
r = 0
if(self.terminal):
return r, self.terminal
a = self.action_map[a]
# Spawn enemy if timer is up
if(self.spawn_timer==0):
self._spawn_entity()
self.spawn_timer = self.spawn_speed
# Resolve player action
if(a=='l'):
self.player_x = max(0, self.player_x-1)
elif(a=='r'):
self.player_x = min(9, self.player_x+1)
elif(a=='u'):
self.player_y = max(1, self.player_y-1)
elif(a=='d'):
self.player_y = min(8, self.player_y+1)
# Update entities
for i in range(len(self.entities)):
x = self.entities[i]
if(x is not None):
if(x[0:2]==[self.player_x,self.player_y]):
if(self.entities[i][3]):
self.entities[i] = None
r+=1
else:
self.terminal = True
if(self.move_timer==0):
self.move_timer = self.move_speed
for i in range(len(self.entities)):
x = self.entities[i]
if(x is not None):
x[0]+=1 if x[2] else -1
if(x[0]<0 or x[0]>9):
self.entities[i] = None
if(x[0:2]==[self.player_x,self.player_y]):
if(self.entities[i][3]):
self.entities[i] = None
r+=1
else:
self.terminal = True
# Update various timers
self.spawn_timer -= 1
self.move_timer -= 1
#Ramp difficulty if interval has elapsed
if(self.spawn_speed>1 or self.move_speed>1 and self.ramping):
if(self.ramp_timer>=0):
self.ramp_timer-=1
else:
if(self.move_speed>1 and self.ramp_index%2):
self.move_speed-=1
if(self.spawn_speed>1):
self.spawn_speed-=1
self.ramp_index+=1
self.ramp_timer=ramp_interval
return r, self.terminal
# Spawn a new enemy or treasure at a random location with random direction (if all rows are filled do nothing)
def _spawn_entity(self):
lr = self.random.choice([True,False])
is_gold = self.random.choice([True,False], p=[1/3,2/3])
x = 0 if lr else 9
slot_options = [i for i in range(len(self.entities)) if self.entities[i]==None]
if(not slot_options):
return
slot = self.random.choice(slot_options)
self.entities[slot] = [x,slot+1,lr,is_gold]
# Query the current level of the difficulty ramp, could be used as additional input to agent for example
def difficulty_ramp(self):
return self.ramp_index
# Process the game-state into the 10x10xn state provided to the agent and return
def state(self):
state = np.zeros((10,10,len(self.channels)))
state[self.player_y,self.player_x,self.channels['player']] = 1
for x in self.entities:
if(x is not None):
c = self.channels['gold'] if x[3] else self.channels['enemy']
state[x[1], x[0],c] = 1
back_x = x[0]-1 if x[2] else x[0]+1
if(back_x>=0 and back_x<=9):
state[x[1], back_x, self.channels['trail']] = 1
return state
# Reset to start state for new episode
def reset(self):
self.player_x = 5
self.player_y = 5
self.entities = [None]*8
self.shot_timer = 0
self.spawn_speed = init_spawn_speed
self.spawn_timer = self.spawn_speed
self.move_speed = init_move_interval
self.move_timer = self.move_speed
self.ramp_timer = ramp_interval
self.ramp_index = 0
self.terminal = False
# Dimensionality of the game-state (10x10xn)
def state_shape(self):
return [10,10,len(self.channels)]
# Subset of actions that actually have a unique impact in this environment
def minimal_action_set(self):
minimal_actions = ['n','l','u','r','d']
return [self.action_map.index(x) for x in minimal_actions]
```
#### File: AC_CDDQN_code/utils/commons.py
```python
import torch, random, numpy as np, pickle
def get_state(s):
return (torch.tensor(s).permute(2, 0, 1)).unsqueeze(0).float().cuda()
def to_numpy(t):
return t.cpu().detach().numpy()
def world_dynamics(opts, t, s, env, policy_net1, policy_net2):
# A uniform random policy is run before the learning starts
if t < opts.replay_start_size:
action = torch.tensor([[random.randrange(opts.num_actions)]]).cuda()
else:
epsilon = opts.end_epsilon if t - opts.replay_start_size >= opts.first_n_frames \
else ((opts.end_epsilon - opts.epsilon) / opts.first_n_frames) * (t - opts.replay_start_size) + opts.epsilon
if np.random.binomial(1, epsilon) == 1:
action = torch.tensor([[random.randrange(opts.num_actions)]]).cuda()
else:
with torch.no_grad():
action = (policy_net1(s) + policy_net2(s)).max(1)[1].view(1, 1)
reward, terminated = env.act(action)
s_prime = get_state(env.state())
# return s_prime.cpu(), action.cpu(), torch.tensor([[reward]]).float(), torch.tensor([[terminated]])
return s_prime, action, torch.tensor([[reward]]).float().cuda(), torch.tensor([[terminated]]).cuda()
def load_data(path):
file = open(path, "rb")
data = pickle.load(file)
file.close()
return data
def save_data(path, data):
file = open(path, "wb")
pickle.dump(data, file)
file.close()
```
#### File: AC_CDQ/AC_CDE_code/main.py
```python
import numpy as np, multiprocessing
from multiprocessing import Pipe
from utils import ME, DE, CDE, AC_CDE, Env, G1, G2, G3, save_data
r = 0.15
n_runs = 2
id = None
alg_id = None
alg_ops = [ME(), DE(), CDE(), AC_CDE()]
p_ops = [G1(), G2(), G3()]
def f(n_action, p_op_idx, n_sample, n_repeat, alg_idx, _i, _j, K=None, pipe=None, seed=None):
print("alg: %d, %d start."%(_i, _j))
np.random.seed(seed)
results = np.zeros(n_repeat)
p_op = p_ops[p_op_idx[0]] if isinstance(p_op_idx, list) else p_ops[p_op_idx]
for i in range(n_repeat):
p = p_op(p_op_idx[1], n_action) if isinstance(p_op_idx, list) else p_op(n_action)
env = Env(n_action, p)
samples = np.zeros((n_action, n_sample))
for j in range(n_action):
samples[j] = env.sample(j, n_sample)
real = np.max(p)
if alg_idx in [3]:
results[i] = alg_ops[alg_idx](samples, n_action, K, r) - real
else:
results[i] = alg_ops[alg_idx](samples) - real
bias = np.mean(results)
bias2 = bias ** 2
variance = np.var(results)
mse = bias2 + variance
print("alg: %d, %d ok."%(_i, _j))
pipe.send([ _i, _j , np.array([bias2, variance, mse, bias])])
def mp(args, n_core, results):
n = len(args)
pipes = [Pipe() for _ in range(n_core)]
pool = multiprocessing.Pool(processes=n_core)
for i in range(0, n, n_core):
print("start %d"%(i))
ps = []
seeds = np.random.randint(1, 10000, len(args[i: i + n_core]))
for j, arg in enumerate(args[i: i+n_core]):
ps.append(pool.apply_async(f, args=arg + [pipes[j][1]] + [seeds[j]]))
[p.get() for p in ps]
for pipe in pipes[:len(args[i: i + n_core])]:
m, n, data = pipe[0].recv()
results[m][n] = data
return results
def setting1():
n_action = 30
n_samples = 1000 * (np.arange(10) + 1)
n_repeat = 2000
p_op_idx = 0
alg_idxs = [alg_id]
n_core = 10
results = np.zeros([len(alg_ops), n_samples.size, 4])
args = [[n_action, p_op_idx, n_sample, n_repeat, alg_idx, i, j, None] for j, n_sample in enumerate(n_samples) for i, alg_idx in enumerate(alg_idxs)]
results = mp(args, n_core, results)
save_data(results, "./results/setting%d_alg%d_id%d.pth"%(1, alg_idxs[0], id))
def setting2():
n_actions = 10 * (np.arange(10) + 1)
n_sample = 10000
n_repeat = 2000
p_op_idx = 0
alg_idxs = [alg_id]
n_core = 10
results = np.zeros([len(alg_ops), n_actions.size, 4])
args = [[n_action, p_op_idx, n_sample, n_repeat, alg_idx, i, j, None] for j, n_action in enumerate(n_actions) for i, alg_idx in enumerate(alg_idxs)]
results = mp(args, n_core, results)
save_data(results, "./results/setting%d_alg%d_id%d.pth"%(2, alg_idxs[0], id))
def setting3():
n_action = 30
n_sample = 10000
n_repeat = 2000
p_op_idx = 1
p_num = 9
n_core = 10
alg_idxs = [alg_id]
results = np.zeros([len(alg_ops), p_num, 4])
args = [[n_action, [p_op_idx, p_idx], n_sample, n_repeat, alg_idx, i, j, None] for j, p_idx in enumerate(range(p_num)) for i, alg_idx in enumerate(alg_idxs)]
results = mp(args, n_core, results)
save_data(results, "./results/setting%d_alg%d_id%d.pth"%(3, alg_idxs[0], id))
def main(setting_idx):
ops = [setting1, setting2, setting3]
ops[setting_idx]()
if __name__ == "__main__":
for idx in range(n_runs):
id = idx + 1
for setting_idx in [0, 1, 2]:
for _alg_id in [0, 1, 2, 3]:
np.random.seed(idx * 10)
alg_id = _alg_id
main(setting_idx)
```
#### File: AC_CDQ/AC_CDQ_code/main.py
```python
import numpy as np
from utils import opts, q, dq, cdq, ac_cdq
def main():
assert opts.setting in [1, 2, 3], "Unknown Reward Setting Type."
# reward setting
reward_array = np.zeros((opts.n_repeat, opts.n_step))
random_array = np.random.rand(opts.n_repeat, opts.n_step)
reward_array[np.where(random_array >= 0.5)] = -6
reward_array[np.where(random_array < 0.5)] = 4
# run algorithms
q(opts, reward_array) # q-learning
dq(opts, reward_array) # double q-learning
cdq(opts, reward_array) # clipped double q-learning
ac_cdq(opts, reward_array) # action candidate based clipped double q-learning
if __name__ == '__main__':
main()
```
#### File: AC_CDQ_code/utils/ac_cdq.py
```python
from utils.step import step
from utils.commons import save_data
from utils.play_mp import play_mp
import multiprocessing as mp, numpy as np, pdb
def _ac_cdq(opts, reward_array, repeat_n, seed, which_Q, pipe_send):
np.random.seed(seed)
Q = np.zeros((opts.n_state, opts.n_action, 2))
Q_q = np.zeros((opts.n_state, opts.n_action))
current_state = opts.start
n_eps = np.zeros(opts.n_state)
n_alpha = np.zeros((opts.n_state, opts.n_action, 2))
n_alpha_q = np.zeros((opts.n_state, opts.n_action))
rewards = np.zeros(opts.n_step)
max_Q0 = np.zeros(opts.n_step)
for i in range(opts.n_step):
idx_Q = which_Q[i]
n_eps[current_state] += 1
action, reward, next_state = step(opts, Q, current_state, n_eps, reward_array, i)
n_alpha[current_state][action][idx_Q] += 1
n_alpha_q[current_state][action] += 1
if current_state == opts.goal:
delta = reward - Q[current_state][action][idx_Q]
else:
idxs = Q[next_state, :, 1 - idx_Q].argsort()[-max(int(opts.n_action / 2), opts.K): ]
max_action = idxs[np.argmax(Q[next_state, idxs, idx_Q])]
delta = reward + opts.gamma * np.minimum(np.max(Q[next_state, max_action, 1 - idx_Q]), np.max(Q[next_state, :, idx_Q])) - Q[current_state][action][idx_Q]
Q[current_state][action][idx_Q] = Q[current_state][action][idx_Q] + (1 / np.power(n_alpha[current_state][action][idx_Q], opts.exp)) * delta
rewards[i] = reward
max_Q0[i] = np.max(np.mean(Q[opts.start], 1), 0)
current_state = next_state
pipe_send.send([repeat_n, rewards, max_Q0])
def ac_cdq(opts, reward_array):
pool = mp.Pool(processes=opts.n_core)
which_Q = (np.random.rand(opts.n_repeat, opts.n_step) >= 0.5).astype(np.uint8)
args = [[reward_array[i], i, np.random.choice(1000000), which_Q[i]] for i in range(opts.n_repeat)]
rewards, max_Q0 = play_mp(opts, pool, args, _ac_cdq)
save_data([np.mean(rewards, 0), np.mean(max_Q0, 0)], opts.save_path % "ac_cdq")
pool.close()
```
#### File: AC_CDQ_code/utils/step.py
```python
import numpy as np
def step(opts, Q, current_state, n_eps, reward_array, step_n, probs=None):
if probs is not None:
cums = np.cumsum(probs)
probs = cums / np.max(cums)
action = np.random.choice(np.where(np.random.rand() <= probs)[0])
elif opts.policy == "eps":
if np.random.rand() > 1 / np.sqrt(n_eps[current_state]):
if len(Q.shape) == 3:
action = np.argmax(np.mean(Q, 2)[current_state])
if len(Q.shape) == 2:
action = np.argmax(Q[current_state])
else:
action = np.random.randint(0, 4, 1)[0]
if current_state != opts.goal:
if action == 0:
next_state = current_state - opts.n_col
if next_state < 0:
next_state = current_state
elif action == 1:
next_state = current_state + opts.n_col
if next_state >= opts.n_state:
next_state = current_state
elif action == 2:
next_state = current_state - 1
if (next_state + 1) % opts.n_col == 0:
next_state = current_state
elif action == 3:
next_state = current_state + 1
if next_state % opts.n_col == 0:
next_state = current_state
reward = reward_array[step_n]
else:
reward = np.random.choice([-30, 40])
next_state = opts.start
return action, reward, next_state
```
#### File: AC_TD3/modules/actor.py
```python
import torch, torch.nn as nn, torch.nn.functional as F
class Actor(nn.Module):
def __init__(self, opts):
super(Actor, self).__init__()
# A1
self.l1 = nn.Linear(opts.state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, opts.action_dim)
# A2
self.l4 = nn.Linear(opts.state_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, opts.action_dim)
self.max_action = opts.max_action
def A1(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
def A2(self, state):
a = F.relu(self.l4(state))
a = F.relu(self.l5(a))
return self.max_action * torch.tanh(self.l6(a))
def forward(self, state):
return self.A1(state)
```
#### File: AC_TD3_code/utils/commons.py
```python
import pickle
def load_data(path):
file = open(path, "rb")
data = pickle.load(file)
file.close()
return data
def save_data(path, data):
file = open(path, "wb")
pickle.dump(data, file)
file.close()
def chunker_list(seq, size):
return [seq[pos: pos + size] for pos in range(0, len(seq), size)]
def chunker_num(num, size):
return [list(range(num))[pos: pos + size] for pos in range(0, num, size)]
```
#### File: AC_TD3_code/utils/run.py
```python
import gym, numpy as np, pdb, torch, time
from utils import ReplayBuffer, eval_policy, Recoder
from AC_TD3.ac_td3 import AC_TD3 as Method
def run(opts, seed):
opts.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.manual_seed(seed)
np.random.seed(seed)
# recoder
recoder = Recoder(opts.results_dir, seed)
# env
env = gym.make(opts.env_nm)
env.seed(seed)
opts.state_dim = env.observation_space.shape[0]
opts.action_dim = env.action_space.shape[0]
opts.max_action = float(env.action_space.high[0])
opts.policy_noise = opts.policy_noise * opts.max_action
opts.noise_clip = opts.noise_clip * opts.max_action
# policy setting
policy = Method(opts, seed)
# replay buffer
replay_buffer = ReplayBuffer(opts)
state, done = env.reset(), False
episode_reward, episode_timesteps, episode_num, eval_idx = 0, 0, 0, 0
t1 = time.time()
for t in range(int(opts.max_timesteps)):
episode_timesteps += 1
# Select action randomly or according to policy
if t < opts.start_timesteps:
action = env.action_space.sample()
else:
action = (policy.select_action(np.array(state)) + np.random.normal(0, opts.max_action * opts.expl_noise, size=opts.action_dim)
).clip(-opts.max_action, opts.max_action)
# Perform action
next_state, reward, done, _ = env.step(action)
done_bool = float(done) if episode_timesteps < env._max_episode_steps else 0
# Store data in replay buffer
replay_buffer.add(state, action, next_state, reward, done_bool)
state = next_state
episode_reward += reward
# Train agent after collecting sufficient data
if t >= opts.start_timesteps:
policy.train(replay_buffer)
if done:
# +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True
print("- Seed: %d, Total T: %d, Episode Num: %d, Episode T: %d, Reward: %.3f, Time: %.2f, %s -" % (
seed, t + 1, episode_num + 1, episode_timesteps + 1, episode_reward, time.time() - t1, opts.tag))
recoder.add_result(episode_reward, "train_return")
# recoder.add_result(episode_reward, "train_return")
recoder.save_result()
# Reset environment
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num += 1
# Evaluate episode
if opts.is_eval and (t + 1) % opts.eval_freq == 0:
eval_scores, max_qs, real_qs = eval_policy(opts, policy, eval_idx + 1, t + 1)
print("eval_scores", eval_scores, "max_qs", max_qs, "real_qs", real_qs)
recoder.add_result({"eval_scores": eval_scores, "max_qs": max_qs, "real_qs": real_qs}, "test_return")
recoder.save_result()
if opts.save_model:
policy.save()
recoder.save_result()
``` |
{
"source": "jianghong/lcsranking",
"score": 2
} |
#### File: jianghong/lcsranking/runserver.py
```python
import json
from flask import Flask, render_template
import localsettings
app = Flask(__name__)
app.debug = localsettings.DEBUG
WEEKS = 999999999
f = open('api_payload.json')
API_DATA = json.load(f)
@app.route('/')
def index():
return render_template('index.html', api_data=API_DATA)
def add_points(obj, mode='actualValue'):
total = 0
point_factors = {
'assists': 1.5,
'deaths': -0.5,
'doubleKills': 0.0,
'killOrAssistBonus': 2.0,
'kills': 2.0,
'minionKills': .01,
'pentaKills': 5.0,
'quadraKills': 3.0,
'tripleKills': 2.0
}
for i in xrange(WEEKS):
for k, v in obj[str(i+1)].iteritems():
if k != 'week':
total += v[mode] * point_factors[k]
return total
@app.context_processor
def utility_processor():
return dict(add_points=add_points)
@app.template_filter('get_team')
def get_team(player_obj):
return API_DATA['proTeams'][str(player_obj['proTeamId'])]['shortName']
@app.template_filter('get_expr')
def get_expr(player_obj):
player_team = API_DATA['proTeams'][str(player_obj['proTeamId'])]
total_wins = 0
total_losses = 0
total_kills = 0
total_assists = 0
total_deaths = 0
for i in xrange(WEEKS):
total_wins += player_team['statsByWeek'][str(i+1)]['matchVictory']['actualValue']
total_losses += player_team['statsByWeek'][str(i+1)]['matchDefeat']['actualValue']
total_kills += player_obj['statsByWeek'][str(i+1)]['kills']['actualValue']
total_assists += player_obj['statsByWeek'][str(i+1)]['assists']['actualValue']
total_deaths += player_obj['statsByWeek'][str(i+1)]['deaths']['actualValue']
KDA = (float(total_deaths) / (total_kills + total_assists)) + 1.0
return round(((1.0 - float(total_wins) / (total_losses + total_wins)) * add_points(player_obj['statsByWeek'])) / KDA, 2)
@app.template_filter('get_winloss')
def get_winloss(player_obj):
player_team = API_DATA['proTeams'][str(player_obj['proTeamId'])]
total_wins = 0
total_losses = 0
for i in xrange(WEEKS):
total_wins += player_team['statsByWeek'][str(i+1)]['matchVictory']['actualValue']
total_losses += player_team['statsByWeek'][str(i+1)]['matchDefeat']['actualValue']
return "{0}W - {1}L".format(total_wins, total_losses)
@app.context_processor
def inject_debug():
return dict(debug=localsettings.DEBUG)
if __name__ == '__main__':
app.run()
``` |
{
"source": "JiangHongSh/TestGit",
"score": 3
} |
#### File: TestGit/2018-10-29/spider_main.py
```python
import url_manager, html_downloader, html_parser, html_outputer
import urllib.parse
class SpiderMain(object):
def __init__(self):
self.urls=url_manager.UrlManager()
self.downloader=html_downloader.HtmlDownloader()
self.parser=html_parser.HtmlParser()
self.outputer=html_outputer.HtmlOutputer()
def craw(self, root_url):
count=1
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
try:
new_url=self.urls.get_new_url()
print("craw %d:%s" %(count,(urllib.parse.unquote(new_url))))
html_cont=self.downloader.download(new_url)
new_urls,new_data=self.parser.parse(new_url,html_cont)
self.urls.add_new_urls(new_urls)
self.outputer.collect_data(new_data)
if count == 100:
break
count=count+1
except Exception as e:
print(e)
print("craw failed")
self.outputer.output_html()
if __name__=="__main__":
root_url="https://baike.baidu.com/item/Scarborough%20Fair/9424588?fr=aladdin"
obj_spider = SpiderMain()
obj_spider.craw(root_url)
```
#### File: TestGit/2018-11-05/spider_main.py
```python
import url_manager, html_downloader, html_parser, html_outputer
import urllib.parse
import re
import urllib.parse
from urllib import parse
from bs4 import BeautifulSoup
class SpiderMain(object):
def __init__(self):
self.downloader=html_downloader.HtmlDownloader()
self.parser=html_parser.HtmlParser()
self.outputer=html_outputer.HtmlOutputer()
def craw(self, root_url):
count=1
#self.urls.add_new_url(root_url)
response = urllib.request.urlopen(root_url)
html_doc2 = response.read()
soup = BeautifulSoup(html_doc2, 'html.parser', from_encoding='utf-8')
new_urls = set()
links = soup.findAll('a',href=re.compile(r"^/news/"))
i = 0 ;
for link in links:
new_url = link['href']
new_full_url = urllib.parse.urljoin(root_url, new_url)
new_urls.add(new_full_url)
new_urls.add(new_full_url)
print(new_full_url)
html_cont = self.downloader.download(new_full_url)
new_data=self.parser.parse(new_full_url,html_cont)
self.outputer.collect_data(new_data)
i=i+1
if i>10 :
break
self.outputer.output_html()
print("over")
if __name__=="__main__":
root_url="https://www.yahoo.com/news/"
obj_spider = SpiderMain()
obj_spider.craw(root_url)
```
#### File: TestGit/2018-11-12/html_parser.py
```python
import re
import urllib.parse
from urllib import parse
from bs4 import BeautifulSoup
class HtmlParser(object):
def parse(self, page_url, html_cont):
if page_url is None or html_cont is None:
return
soup = BeautifulSoup(html_cont,"html.parser")
new_data = self._get_new_data(page_url,soup)
return new_data
def _get_new_data(self, page_url, soup):
res_data={}
res_data['url']=page_url
title_node=soup.find('header').find("h1")
res_data['title']=title_node.get_text()
summary_node = soup.findAll('p')
res_data['summary']=summary_node
img_node = soup.findAll('img')
res_data['imgs']=img_node
return res_data
``` |
{
"source": "Jiang-Hong/testGethPBFT",
"score": 3
} |
#### File: testGethPBFT/chain/conf.py
```python
import json
import time
from math import ceil
def generate_test_config(level: int = 3, terminal_count: int = 1, config_file: str = 'conf0.txt') -> None:
"""
Generate a HIBEChain config file.
Only one terminal for one leaf chain.
"""
if level > 9: # level starts from 0
raise ValueError("level number should not exceeds 9")
chain_count = 2 ** (level + 1) - 1
id_list = [None] * chain_count
id_list[0] = ''
thresh_list = [(19, 13)]
for i in range(1, chain_count):
if i % 2:
id_list[i] = id_list[ceil(i/2)-1] + '01'
else:
id_list[i] = id_list[ceil(i/2)-1] + '02'
thresh_list.append((19, 13))
new_count = chain_count
for i in range(chain_count//2, chain_count):
for j in range(1, terminal_count+1):
id_list.append(id_list[i]+'%02d' % j)
thresh_list.append((1, 1))
new_count += 1
print('Total: %d nodes' % sum([x for x, _ in thresh_list]))
print(id_list)
print(thresh_list)
# chain_count = new_count
lines = []
for i in range(level+1):
index = 2 ** i
tmp_id = ' '.join(id_list[:index])
tmp_thresh = ' '.join('%s,%s' % tup for tup in thresh_list[:index])
id_list = id_list[index:]
thresh_list = thresh_list[index:]
lines.append(tmp_id)
lines.append(tmp_thresh)
tmp_id = ' '.join(id_list)
tmp_thresh = ' '.join('%s,%s' % tup for tup in thresh_list)
lines.append(tmp_id)
lines.append(tmp_thresh)
with open('../config/'+config_file, 'w') as file:
file.write('\n'.join(lines))
def generate__tri_test_config(level: int = 3, terminal_count: int = 8, config_file: str = 'conf0.txt') -> None:
"""Generate a HIBEChain config file."""
if level > 3: # level starts from 0
raise ValueError("level number should not exceeds 3")
chain_count = (3 ** (level + 1) - 1) // 2
id_list = [None] * chain_count
id_list[0] = ''
thresh_list = [(22, 15)]
for i in range(1, chain_count):
if i % 3 == 1:
id_list[i] = id_list[ceil(i/3)-1] + '0001'
elif i % 3 == 2:
id_list[i] = id_list[ceil(i/3)-1] + '0002'
else:
id_list[i] = id_list[ceil(i/3)-1] + '0003'
thresh_list.append((22, 15))
new_count = chain_count
for i in range(chain_count//3, chain_count):
for j in range(1, terminal_count+1):
id_list.append(id_list[i]+'%04d' % j)
thresh_list.append((1, 1))
new_count += 1
break
print('Total: %d nodes' % sum([x for x, _ in thresh_list]))
print(id_list)
print(thresh_list)
# chain_count = new_count
lines = []
for i in range(level+1):
index = 3 ** i
tmp_id = ' '.join(id_list[:index])
tmp_thresh = ' '.join('%s,%s' % tup for tup in thresh_list[:index])
id_list = id_list[index:]
thresh_list = thresh_list[index:]
lines.append(tmp_id)
lines.append(tmp_thresh)
tmp_id = ' '.join(id_list)
tmp_thresh = ' '.join('%s,%s' % tup for tup in thresh_list)
lines.append(tmp_id)
lines.append(tmp_thresh)
with open('../config/'+config_file, 'w') as file:
file.write('\n'.join(lines))
def load_config_file(config_file: str = 'conf0.txt') -> tuple:
"""Get id_list & thresh_list from a config file."""
id_list = ['']
thresh_list = []
with open('../config/'+config_file) as file:
lines = file.readlines()
while not lines[-1].split():
lines.pop(-1)
if len(lines) % 2:
raise RuntimeError('line number of configure file should be even')
while True:
add_id = lines[0]
thresh = lines[1]
lines = lines[2:]
id_list += add_id.split()
thresh_list += list(map(tuple, [item.split(',') for item in thresh.split()]))
if not lines:
break
thresh_list = [tuple(map(int, thresh)) for thresh in thresh_list]
if len(id_list) != len(thresh_list):
raise RuntimeError('length of id_list should match length of thresh_list')
return id_list, thresh_list
def generate_genesis(chain_id: int, accounts: list, config_file: str) -> None:
"""Generate a genesis file."""
with open('../docker/120.json', 'rb') as f:
genesis = json.load(f)
genesis['config']['chainId'] = chain_id
for acc in accounts:
genesis['alloc'][acc] = {'balance': "0x200000000000000000000000000000000000000000000000000000000000000"}
extra_data = '0x' + '0'*64 + ''.join(accounts) + '0' * 130
print("extra data in genesis file", extra_data)
genesis['extraData'] = extra_data
new_genesis = json.dumps(genesis, indent=2)
with open('../config/%s' % config_file, 'w') as f:
print(new_genesis, file=f)
time.sleep(0.05)
def generate_leaf_genesis(config_file: str, leaves: list) -> None:
"""Generate a genesis file for leaf chains and terminals."""
with open('../config/%s' % config_file, 'rb') as f:
genesis = json.load(f)
for chain in leaves:
print('--------------leaf-------------------------------')
print('-------------file name', config_file)
account_ascii = []
terminal_id = chain.chain_id[:-2]
for char in terminal_id:
account_ascii.append(hex(ord(char))[2:])
tmp_account = ''.join(account_ascii)
for i in range(0, 5):
for j in range(0, 256):
terminal_account = tmp_account
terminal_account += hex(i)[2:].zfill(2) + hex(j)[2:].zfill(2)
terminal_account = terminal_account + (40 - len(terminal_account) - 1) * '0' + '1'
# print(terminal_account)
if len(terminal_account) != 40:
print('terminal account:', terminal_account)
raise ValueError('length of account should be 40')
genesis['alloc'][terminal_account] = {'balance': "0x200000000000000000000000000000000000000000000000000000000000000"}
new_genesis = json.dumps(genesis, indent=2)
with open('../config/%s' % config_file, 'w') as f:
print(new_genesis, file=f)
time.sleep(0.05)
# def generate_terminal_genesis(config_file, terminals):
# """Generate a genesis file for leaf chains and terminals."""
# with open('docker/%s' % config_file, 'rb') as f:
# genesis = json.load(f)
# for chain in terminals:
# account = []
# for char in chain.chain_id:
# account.append(hex(ord(char))[2:])
# acc = ''.join(account)
# acc = acc + (40 - len(acc) - 1) * '0' + '1'
# if len(acc) != 40:
# print('account:', acc)
# raise ValueError('length of account should be 40')
# print(acc)
# genesis['alloc'][acc] = {'balance': "0x200000000000000000000000000000000000000000000000000000000000000"}
# new_genesis = json.dumps(genesis, indent=2)
# with open('docker/%s' % config_file, 'w') as f:
# print(new_genesis, file=f)
if __name__ == '__main__':
generate_test_config()
id_list, thresh_list = load_config_file()
print(id_list)
print(thresh_list)
```
#### File: testGethPBFT/chain/gethnode.py
```python
import json
from typing import Union, Optional, Any
import requests
from time import sleep
from datetime import datetime
from chain.iplist import IPList
from chain.const import IMAGE, USERNAME, IP_CONFIG, SECONDS_IN_A_DAY, SEMAPHORE
# class GethNode0(object):
# """data structure for geth client running in a docker container"""
#
# def __init__(self, userName=USERNAME, passWord=<PASSWORD>):
# self.enode = ''
# self.ip, self.rpc_port, self.ethereum_network_port = IPlist.getNewPort()
# self.name = 'geth-pbft' + str(self.rpc_port)
# self._headers = {'Content-Type': 'application/json', 'Connection': 'close'}
# self._userName = USERNAME
# self.password = <PASSWORD>
# self.accounts = []
# self._ifSetGenesis = False
#
# def start(self):
# """start a container for geth client """
# pass
class GethNode(object):
"""Data structure for Geth-pbft client."""
def __init__(self, ip_list: IPList, pbft_id: int, node_index: int, blockchain_id: int,
username: str = USERNAME) -> None:
self.id = node_index # used in rpc call
self.ip, self.rpc_port, self.ethereum_network_port = ip_list.get_new_port()
self.pbft_id = pbft_id
self.node_index = node_index
self.blockchain_id = blockchain_id
self.name = 'geth-pbft' + str(self.rpc_port) # docker container name of this node
self.enode = ''
self.accounts = [] # accounts list of a geth node
self._headers = {'Content-Type': 'application/json'} # for rpc call use # 'Connection': 'close'?
self.username = username # user name of login user of a server
def __repr__(self) -> str:
return self.ip.address + ":" + str(self.rpc_port)
def start(self) -> None:
"""Start a container for geth on remote server and create a new account."""
# --ulimit nofile=<soft limit>:<hard limit> set the limit for open files
docker_run_command = ('docker run --ulimit nofile=65535:65535 -td -p %d:8545 -p %d:30303 --rm --name %s %s' %
(self.rpc_port, self.ethereum_network_port, self.name, IMAGE))
sleep(0.6)
result = self.ip.exec_command(docker_run_command)
if result:
if result.startswith('docker: Error'):
print(result)
print(self.ip)
raise RuntimeError('An error occurs while starting docker container. Container maybe already exists')
print('container of node %s of blockchain %s at %s:%s started' % (self.node_index, self.blockchain_id,
self.ip.address, self.rpc_port))
new_account_command = 'docker exec -t %s geth --datadir abc account new --password <PASSWORD>' % self.name
sleep(0.1)
account = self.ip.exec_command(new_account_command).split()[-1][1:-1]
sleep(0.3)
if len(account) == 40: # check if the account is valid
self.accounts.append(account)
else:
print('invalid account')
def rpc_call(self, method: str, params: Optional[list] = None) -> Any:
"""Make an rpc call to this geth node."""
if params is None:
params = []
data = json.dumps({ # json string used in HTTP requests
'jsonrpc': '2.0',
'method': method,
'params': params,
'id': self.id
})
url = "http://{}:{}".format(self.ip.address, self.rpc_port)
with SEMAPHORE:
with requests.Session() as r:
# sleep(0.01) ###
response = r.post(url=url, data=data, headers=self._headers)
while response.headers['Content-Type'] != 'application/json':
print(self.ip.address, self.rpc_port)
print(response.status_code, response.headers)
print(response.content)
sleep(0.05)
response = r.post(url=url, data=data, headers=self._headers)
content = response.json()
# sleep(0.02)
print(content)
result = content.get('result')
err = content.get('error')
if err:
raise RuntimeError(self.ip.address, self.rpc_port, err.get('message'))
print('%s @%s : %s %s' % (method, self.ip.address, self.rpc_port, result))
return result
def test(self, **kwargs) -> Any:
method = kwargs['method']
params = kwargs['params']
return self.rpc_call(method, params)
def get_peer_count(self) -> int:
"""net.peerCount"""
method = 'net_peerCount'
result = self.rpc_call(method)
return int(result, 16) if result else 0 # change hex number to dec
def get_peers(self) -> str:
"""admin.peers"""
method = 'admin_peers'
sleep(0.01)
peers = self.rpc_call(method)
return peers
def new_account(self, password: str = '<PASSWORD>') -> None:
"""personal.newAccount(password)"""
method = 'personal_newAccount'
params = [password]
account = self.rpc_call(method, params)
self.accounts.append(account[2:])
def key_status(self) -> bool:
"""admin.key_status()"""
method = 'admin_keyStatus'
status = self.rpc_call(method)
return status
def unlock_account(self, account: str = '0', password: str = '<PASSWORD>', duration: int = SECONDS_IN_A_DAY) -> bool:
"""personal.unlockAccount()"""
method = 'personal_unlockAccount'
params = [account, password, duration]
result = self.rpc_call(method, params)
return result
def send_old_transaction(self, to_id: str, to_index: int, value: Union[str, int] = '0x1'):
"""eth.sendTransaction()"""
if isinstance(value, int): # if value is int, change it to hex str
value = hex(value)
params = [{"toid": to_id, "toindex": to_index, "value": value}]
method = 'eth_sendTransaction'
sent_count = self.rpc_call(method, params)
return sent_count
def send_transaction(self, to_id: str, to_index: int, value: Union[str, int] = '0x1'):
"""eth.sendTransaction2()"""
if isinstance(value, int): # if value is int, change it to hex str
value = hex(value)
params = [{"toid": to_id, "toindex": to_index, "value": value}]
method = 'eth_sendTransaction2'
# sleep(0.2)
return self.rpc_call(method, params)
def send_transaction3(self, terminal_number: int, iter_round: int, nonce: Union[str, int] = '0x0',
value: Union[str, int] = '0x1', speed: int = 10000):
"""eth.sendTransaction3()"""
if isinstance(value, int): # if value is int, change it to hex str
value = hex(value)
if isinstance(nonce, int):
nonce = hex(nonce)
params = [{"terminal": terminal_number, "round": iter_round, "nonce": nonce, "value": value, "speed": speed}]
method = 'eth_sendTransaction3'
print('sending transactions...')
print('UTC time is', datetime.utcnow())
return self.rpc_call(method, params)
def test_send_transaction(self, to_id: str, to_index: int, value: Union[str, int], interval: int, period: int):
"""eth.testSendTransaction2()"""
if isinstance(value, int): # if value is int, change it to hex str
value = hex(value)
params = [{"toid": to_id, "toindex": to_index, "value": value, "txinterval": interval, "txperiod": period}]
method = 'eth_testSendTransaction2'
# sleep(0.2)
return self.rpc_call(method, params)
def get_transaction(self, transaction_id: str):
"""eth.getTransaction()"""
method = 'eth_getTransaction'
params = [transaction_id]
return self.rpc_call(method, params)
def get_accounts(self):
"""eth.accounts"""
method = 'eth_accounts'
return self.rpc_call(method)
def get_balance(self, account):
"""eth.getBalance()"""
if not account.startswith('0x'):
account = '0x' + account
method = 'eth_getBalance'
params = [account, 'latest']
return self.rpc_call(method, params)
def get_block_transaction_count(self, index):
"""eth.getBlockTransactionCount()"""
method = 'eth_getBlockTransactionCountByNumber'
params = [hex(index)]
result = self.rpc_call(method, params)
return int(result, 16) if result else 0 # change hex number to dec
def remove_transaction(self) -> bool:
"""eth.removeTx()"""
method = 'eth_removeTx'
return self.rpc_call(method)
def add_peer(self, *args) -> bool:
"""admin.addPeer()"""
method = 'admin_addPeer'
params = list(args)
sleep(0.02) ###
result = self.rpc_call(method, params)
return result
# if RPC does not work well, use this method
# IPC method can be a substitution for RPC method
def ipc_add_peer(self, *args):
"""IPC version admin.addPeer()"""
try:
add_peer_command = ("docker exec -t %s geth attach ipc://root/abc/geth.ipc "
"--exec \"admin.addPeer%s\"" % (self.name, args))
sleep(0.02)
self.ip.exec_command(add_peer_command)
except Exception as e:
raise RuntimeError('%s:%s %s %s %s' % (self.ip, self.ethereum_network_port, self.rpc_port, 'addPeer', e))
def set_enode(self) -> None:
"""Set enode info of a node."""
method = 'admin_nodeInfo'
result = self.rpc_call(method) # result from rpc call
enode = result['enode'].split('@')[0]
self.enode = '{}@{}:{}'.format(enode, self.ip.address, self.ethereum_network_port)
def set_number(self, node_count: int, thresh: int) -> bool:
"""admin.set_number()"""
# Check if the input params are legal
if node_count < thresh:
raise ValueError('nodeCount should be no less than threshold value')
if thresh <= 0 or node_count <= 0:
raise ValueError('nodeCount and threshold value should be positive')
method = 'admin_setNumber'
params = [node_count, thresh]
sleep(0.02)
return self.rpc_call(method, params)
def set_level(self, level, max_level) -> bool:
"""admin.setLevel()"""
# Check if the input params are legal
if max_level < level:
raise ValueError('level should be no larger than maxLevel')
if level < 0:
raise ValueError('level shoud be non-negative')
method = 'admin_setLevel'
params = [max_level, level]
sleep(0.02)
return self.rpc_call(method, params)
def set_id(self, chain_id):
"""admin.setID()"""
method = 'admin_setID'
params = [chain_id]
print('id is:', chain_id)
sleep(0.02)
return self.rpc_call(method, params)
def key_count(self):
"""eth.keyCount()"""
method = 'eth_keyCount'
return self.rpc_call(method)
def txpool_status(self) -> int:
"""txpool.status"""
method = 'txpool_status'
result = self.rpc_call(method)
# sleep(0.1)
print("txpool.status pending:%d, queued:%d" % (int(result['pending'], 16),
int(result['queued'], 16)))
return int(result['pending'], 16) + int(result['queued'], 16)
def start_miner(self) -> None:
"""miner.start()"""
method = 'miner_start'
return self.rpc_call(method)
def stop_miner(self) -> None:
"""miner.stop()"""
method = 'miner_stop'
return self.rpc_call(method)
def get_block_by_index(self, block_index: int) -> Any:
"""eth.getBlock()"""
# check if index is greater than or equal 0
if block_index < 0:
raise ValueError('blockNumber should be non-negative')
block_index_hex_string = hex(block_index)
method = 'eth_getBlockByNumber'
params = [block_index_hex_string, True]
return self.rpc_call(method, params)
def get_transaction_by_block_number_and_index(self, block_number, index) -> str:
block_number_hex_string = hex(block_number)
index_hex_string = hex(index)
method = 'eth_getTransactionByBlockNumberAndIndex'
params = [block_number_hex_string, index_hex_string]
result = self.rpc_call(method, params) # result from rpc call
return result['hash'] if result else None
def get_transaction_proof_by_hash(self, transaction_hash) -> list:
"""eth.getTxProofByHash()"""
method = 'eth_getTxProofByHash'
params = [transaction_hash]
result = self.rpc_call(method, params)
print(result)
return result
def get_transaction_proof_by_proof(self, transaction_proof: list) -> list:
"""eth.getTxProofByProf()"""
method = 'eth_getTxProofByProof'
params = [transaction_proof]
# sleep(0.02)
result = self.rpc_call(method, params)
print(result)
return result
def is_geth_running(self) -> bool:
"""Check if the client is running."""
command = 'docker exec -t %s geth attach ipc://root/abc/geth.ipc --exec "admin.nodeInfo"' % self.name
result = self.ip.exec_command(command)
return False if result.split(':')[0] == 'Fatal' else True
def stop(self) -> None:
"""Remove the geth-pbft node container on remote server."""
stop_command = "docker stop %s" % self.name
self.ip.exec_command(stop_command)
print('node %s of blockchain %s at %s:%s stopped' % (self.node_index, self.blockchain_id,
self.ip.address, self.rpc_port))
if __name__ == "__main__":
ip_list = IPList(ip_file=IP_CONFIG)
ip_list.stop_all_containers()
n = GethNode(ip_list=ip_list, pbft_id=0, node_index=1, blockchain_id=121)
n.start()
print(n.accounts)
n.stop()
```
#### File: testGethPBFT/chain/hibechain.py
```python
from typing import Optional
from chain.const import USERNAME, PASSWD, IP_CONFIG, CONFIG
from chain.conf import load_config_file
from chain.singlechain import SingleChain
from chain.iplist import IPList
import threading
import time
class HIBEChain(object):
"""
Data structure for an Hierarchical Identity Based Encryption Chain.
"""
def __init__(self, chain_id_list: [str], thresh_list: [tuple], ip_list: IPList,
username: str = USERNAME, password: str = <PASSWORD>) -> None:
# Check if the input params are legal
if not len(chain_id_list) == len(thresh_list):
raise ValueError("length of chain_id_list should match length of thresh_list")
needed_count = sum(node_count for (node_count, _) in thresh_list)
containers_count = ip_list.get_full_count()
if needed_count > containers_count:
raise ValueError("%d containers needed but only %d containers available" % (needed_count, containers_count))
self.username = username
self.password = password
self.chains = []
self.structured_chains = []
self.chain_id_list = chain_id_list
self.thresh_list = thresh_list
self.ip_list = ip_list
self.max_level = len(chain_id_list[-1]) // 2
self.if_set_number = False
self.if_set_level = False
self.if_set_id = False
self.init_chains()
threads = []
for level in self.structured_chains[:-1]:
for chain in level:
t = threading.Thread(target=chain.config_consensus_chain)
t.start()
threads.append(t)
for t in threads:
t.join()
threads = []
if not self.structured_chains[-1][0].is_terminal: # no terminals
for chain in self.structured_chains[-1]:
t = threading.Thread(target=chain.config_consensus_chain)
t.start()
threads.append(t)
else: # terminals
for chain in self.structured_chains[-2]:
print('--------------')
print('config leaf chains-----------------------')
chain.config_leaf_chain(self.structured_chains[-1]) # config leaf chains
# break # TODO need to be optimized
for chain in self.structured_chains[-1]:
print('-----------------')
print('----------------------config terminals')
t = threading.Thread(target=chain.config_terminal) # config terminals
t.start()
threads.append(t)
for t in threads:
t.join()
time.sleep(3)
threads = []
for chain in self.chains:
t = threading.Thread(target=chain.run_nodes)
t.start()
threads.append(t)
# time.sleep(1)
for t in threads:
t.join()
time.sleep(0.5)
def construct_hibe_chain(self) -> None:
"""
Construct the hierarchical construction of the HIBEChain.
Connect blockchain nodes with their parent blockchain nodes.
"""
time.sleep(1)
print('construct hibe chain')
threads = []
for chain in self.chains[::-1]:
if chain.chain_id != '':
parent_chain = self.chains[self.chain_id_list.index(chain.chain_id[:-2])]
# parent_chain.connect_lower_chain(chain)
t = threading.Thread(target=parent_chain.connect_lower_chain, args=(chain,))
t.start()
threads.append(t)
time.sleep(1)
# print('active threads:', threading.active_count())
for t in threads:
t.join()
time.sleep(1)
# TODO check peer count
def __repr__(self) -> str:
return ' '.join([str(chain.chain_id) for chain in self.chains])
def __str__(self) -> str:
return '\n'.join([chain.__str__() for chain in self.chains])
def is_connected(self) -> bool:
for level in self.structured_chains:
for chain in level:
parent = self.get_parent_chain(chain)
children = self.get_child_chains(chain)
peer_count = chain.node_count - 1
if parent:
peer_count += parent.node_count
if children:
for child in children:
peer_count += child.node_count
for node in chain.nodes:
tmp_count = node.get_peer_count()
if tmp_count != peer_count:
print('%s %s peer count is %d, should be %d' % (chain, node, tmp_count, peer_count))
return False
return True
def destruct_hibe_chain(self) -> None:
"""Stop all containers to destruct the HIBEChain."""
threads = []
for chain in self.chains:
t = threading.Thread(target=chain.destruct_chain)
t.start()
threads.append(t)
for t in threads:
t.join()
def get_chain(self, chain_id: str = '') -> SingleChain:
"""Return a list of blockchain nodes with a given chain ID(eg. '00010001')."""
try:
index = self.chain_id_list.index(chain_id)
return self.chains[index]
except ValueError or IndexError:
print("ID %s is not in the HIBEChain" % chain_id)
def get_parent_chain(self, chain: SingleChain) -> Optional[SingleChain]:
"""
Return parent chain.
Return None if current chain is root chain.
"""
if chain.chain_id == '':
print('root chain does not have a parent chain')
return None
else:
parent_chain_id = chain.chain_id[:-2]
return self.get_chain(parent_chain_id)
def get_child_chains(self, chain: SingleChain) -> [SingleChain]:
"""
Return a list of child chains.
Return empty list if no child chain.
"""
child_chains = []
for level in self.structured_chains[1:]:
if level[0].chain_id[:-2] == chain.chain_id:
child_chains.extend(level)
return child_chains
def init_chains(self) -> None:
threads = []
# count = 0
level = 0
tmp_chain = list()
for index, name in enumerate(self.chain_id_list):
if name:
print("name is %s" % name, end=" ")
else:
print("name is blank", end=" ")
current_level = len(name) // 2
node_count, threshold = self.thresh_list[index][0], self.thresh_list[index][1]
blockchain_id = 120 + index
tmp = SingleChain(name, current_level, node_count, threshold, blockchain_id, self.ip_list, self.username,
self.password)
self.chains.append(tmp)
if current_level == level:
tmp_chain.append(tmp)
else:
self.structured_chains.append(tmp_chain)
tmp_chain = list()
tmp_chain.append(tmp)
level = current_level
t = threading.Thread(target=tmp.singlechain_start)
t.start()
threads.append(t)
# time.sleep(0.1) ####
self.structured_chains.append(tmp_chain)
print()
for t in threads:
t.join()
for ch in self.structured_chains[-1]:
if ch.threshold == 1:
ch.is_terminal = True
def set_number(self) -> None:
"""Set (n, t) value for all chains in HIBEChain."""
threads = []
for chain in self.chains:
t = threading.Thread(target=chain.set_number, args=())
t.start()
threads.append(t)
for t in threads:
t.join()
self.if_set_number = True
time.sleep(0.3)
def set_level(self) -> None:
"""Set level value for all chains in HIBEChain."""
threads = []
for chain in self.chains:
t = threading.Thread(target=chain.set_level, args=(self.max_level,))
t.start()
threads.append(t)
print('waiting for set level')
for t in threads:
print('.', end='')
t.join()
self.if_set_level = True
time.sleep(0.3)
def set_id(self) -> None:
"""Set ID for all chains in HIBEChain."""
if not self.if_set_number and self.if_set_level:
raise RuntimeError("number and level info should be set previously")
start_time = time.time()
for index, level in enumerate(self.structured_chains):
threads = []
# variable level means all chains in the same level
for chain in level:
t = threading.Thread(target=chain.set_id)
t.start()
threads.append(t)
for t in threads:
t.join()
print('waiting for delivering key')
if index == 0:
time.sleep(max([chain.node_count for chain in level])*6) # *6
# sleep_time = max([chain.node_count for chain in level]) * 10
# print('root level waiting...%ds' % sleep_time)
# time.sleep(sleep_time)
while not all([node.key_status() for chain in level for node in chain.nodes]):
print('root level waiting ')
time.sleep(5)
else:
threads = []
for chain in level:
t = threading.Thread(target=self.gen_key, args=(chain, ))
t.start()
threads.append(t)
for t in threads:
t.join()
time.sleep(5)
# for chain1 in level:
# true_count = 0
# if chain1.is_terminal: # setting terminal node keys
# print('setting terminal keys')
# terminal_node = chain1.get_node_by_index(1)
#
# while True:
# result = terminal_node.key_status()
# if result is False:
# time.sleep(2)
# terminal_node.set_id(chain1.chain_id)
# time.sleep(3)
# else:
# break
# print('40s waiting for key generation...')
# time.sleep(40)
# key_count = terminal_node.key_count()
# while True:
# print('another 10s waiting for key generation...')
# time.sleep(10)
# tmp_count = terminal_node.key_count()
# if tmp_count != 0 and tmp_count == key_count:
# break
# # if tmp_count == 0:
# # chain1.set_id()
# key_count = tmp_count
#
# print('terminal keys generated.')
#
# else:
# while True:
# for node in chain1.nodes:
# print('%s:%s waiting for key' % (node.ip.address, node.rpc_port))
# result = node.key_status()
# if result is True:
# true_count += 1
# else:
# node.set_id(chain1.chain_id)
# print('true count is:', true_count)
# if true_count >= chain1.threshold:
# break
# else:
# time.sleep(5)
#
# # while not all([node.key_status() for node in chain.nodes for chain in level]):
# # print('level %d is not ready, waiting...' % index)
# # time.sleep(5)
self.if_set_id = True
print("------setID finished----------------")
end_time = time.time()
print('setID elapsed time %.2f' % (end_time - start_time))
def start_miner(self) -> None:
"""Start miners for all consensus nodes."""
threads = []
for level in self.structured_chains[:-1]:
for chain in level:
t = threading.Thread(target=chain.start_miner)
t.start()
threads.append(t)
time.sleep(0.02)
for t in threads:
t.join()
def stop_miner(self) -> None:
"""Stop miners for all consensus nodes."""
threads = []
for level in self.structured_chains[:-1]:
for chain in level:
t = threading.Thread(target=chain.stop_miner)
t.start()
threads.append(t)
time.sleep(0.02)
for t in threads:
t.join()
@staticmethod
def gen_key(single_chain: SingleChain) -> None:
if single_chain.is_terminal: # setting terminal node keys
print('setting terminal keys for chain', single_chain.chain_id)
terminal_node = single_chain.get_node_by_index(1)
while True:
result = terminal_node.key_status()
if result is False:
time.sleep(2)
terminal_node.set_id(single_chain.chain_id)
time.sleep(3)
else:
break
print('40s waiting for key generation...')
time.sleep(40)
key_count = terminal_node.key_count()
while True:
print('another 10s waiting for key generation...')
time.sleep(10)
tmp_count = terminal_node.key_count()
if tmp_count != 0 and tmp_count == key_count:
break
# if tmp_count == 0:
# chain1.set_id()
key_count = tmp_count
print(key_count, 'terminal keys generated.')
else:
true_count = 0
while True:
for node in single_chain.nodes:
print('%s:%s waiting for key' % (node.ip.address, node.rpc_port))
time.sleep(2)
result = node.key_status()
if result is True:
true_count += 1
else:
time.sleep(3)
node.set_id(single_chain.chain_id)
time.sleep(2)
print('true count is:', true_count)
if true_count == single_chain.node_count: # true_count >= single_chain.threshold:
break
else:
time.sleep(5)
if __name__ == "__main__":
# chain_id_list = ["", "01", "02"]
# thresh_list = [(4, 3), (1, 1), (1, 1)]
ip_list = IPList(ip_file=IP_CONFIG)
ip_list.stop_all_containers()
chain_id_list, thresh_list = load_config_file(config_file=CONFIG)
hibe = HIBEChain(chain_id_list=chain_id_list, thresh_list=thresh_list, ip_list=ip_list)
hibe.construct_hibe_chain()
hibe.set_number()
hibe.set_level()
hibe.set_id()
# hibe.destruct_hibe_chain()
```
#### File: testGethPBFT/testScript/runInstances.py
```python
import json
import time
import datetime
import traceback
from src.secret import ACCESS_KEY_ID, ACCESS_SECRET
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.acs_exception.exceptions import ClientException, ServerException
from aliyunsdkecs.request.v20140526.RunInstancesRequest import RunInstancesRequest
from aliyunsdkecs.request.v20140526.DescribeInstancesRequest import DescribeInstancesRequest
RUNNING_STATUS = 'Running'
CHECK_INTERVAL = 3
CHECK_TIMEOUT = 180
class AliyunRunInstances(object):
def __init__(self, aliyun_region, image_id, instance_type, instance_amount,
auto_release_time, instance_name, is_dry_run=True):
self.access_id = ACCESS_KEY_ID
self.access_secret = ACCESS_SECRET
# 是否只预检此次请求。true:发送检查请求,不会创建实例,也不会产生费用;false:发送正常请求,通过检查后直接创建实例,并直接产生费用
self.dry_run = is_dry_run
# 实例所属的地域ID
self.region_id = aliyun_region
# 实例的资源规格
self.instance_type = instance_type
# 实例的计费方式
self.instance_charge_type = 'PostPaid'
# 指定创建ECS实例的数量
self.amount = instance_amount
# 购买资源的时长单位
self.period_unit = 'Hourly'
# 自动释放时间 (UTC 时间)
self.auto_release_time = auto_release_time
# 镜像ID
self.image_id = image_id
# 指定新创建实例所属于的安全组ID
self.security_group_id = 'sg-8vbcmij3wtpiz6la5fxo'
# 购买资源的时长
self.period = 1
# 实例所属的可用区编号
self.zone_id = 'random'
# 网络计费类型
self.internet_charge_type = 'PayByTraffic'
# 虚拟交换机ID
self.vswitch_id = 'vsw-8vb1lonrynma16k3e7wge'
# 实例名称
self.instance_name = instance_name
# 实例的描述
self.description = 'geth-pbft'
# 是否使用镜像预设的密码
self.password_inherit = True
# 公网出带宽最大值
self.internet_max_bandwidth_out = 5
# 云服务器的主机名
self.host_name = 'test'
# 是否为实例名称和主机名添加有序后缀
self.unique_suffix = True
# 是否为I/O优化实例
self.io_optimized = 'optimized'
# 系统盘大小
self.system_disk_size = '40'
# 系统盘的磁盘种类
self.system_disk_category = 'cloud_efficiency'
self.client = AcsClient(self.access_id, self.access_secret, self.region_id)
def run(self):
try:
ids = self.run_instances()
self._check_instances_status(ids)
except ClientException as e:
print('Fail. Something with your connection with Aliyun go incorrect.'
' Code: {code}, Message: {msg}'
.format(code=e.error_code, msg=e.message))
except ServerException as e:
print('Fail. Business error.'
' Code: {code}, Message: {msg}'
.format(code=e.error_code, msg=e.message))
except Exception:
print('Unhandled error')
print(traceback.format_exc())
def run_instances(self):
"""
调用创建实例的API,得到实例ID后继续查询实例状态
:return:instance_ids 需要检查的实例ID
"""
request = RunInstancesRequest()
request.set_DryRun(self.dry_run)
request.set_InstanceType(self.instance_type)
request.set_InstanceChargeType(self.instance_charge_type)
request.set_ImageId(self.image_id)
request.set_SecurityGroupId(self.security_group_id)
request.set_Period(self.period)
request.set_PeriodUnit(self.period_unit)
request.set_ZoneId(self.zone_id)
request.set_InternetChargeType(self.internet_charge_type)
request.set_VSwitchId(self.vswitch_id)
request.set_InstanceName(self.instance_name)
request.set_Description(self.description)
request.set_PasswordInherit(self.password_inherit)
request.set_Amount(self.amount)
request.set_InternetMaxBandwidthOut(self.internet_max_bandwidth_out)
request.set_HostName(self.host_name)
request.set_UniqueSuffix(self.unique_suffix)
request.set_IoOptimized(self.io_optimized)
request.set_AutoReleaseTime(self.auto_release_time)
request.set_SystemDiskSize(self.system_disk_size)
request.set_SystemDiskCategory(self.system_disk_category)
body = self.client.do_action_with_exception(request)
data = json.loads(body)
instance_ids = data['InstanceIdSets']['InstanceIdSet']
print('Success. Instance creation succeed. InstanceIds: {}'.format(', '.join(instance_ids)))
return instance_ids
def _check_instances_status(self, instance_ids):
"""
每3秒中检查一次实例的状态,超时时间设为3分钟.
:param instance_ids 需要检查的实例ID
:return:
"""
start = time.time()
while True:
request = DescribeInstancesRequest()
request.set_InstanceIds(json.dumps(instance_ids))
body = self.client.do_action_with_exception(request)
data = json.loads(body)
for instance in data['Instances']['Instance']:
if RUNNING_STATUS in instance['Status']:
instance_ids.remove(instance['InstanceId'])
print('Instance boot successfully: {}'.format(instance['InstanceId']))
if not instance_ids:
print('Instances all boot successfully')
break
if time.time() - start > CHECK_TIMEOUT:
print('Instances boot failed within {timeout}s: {ids}'
.format(timeout=CHECK_TIMEOUT, ids=', '.join(instance_ids)))
break
time.sleep(CHECK_INTERVAL)
if __name__ == '__main__':
"""
需要修改的变量:
ACCESS_KEY_ID, ACCESS_SECRET
image_id: 镜像id
is_dry_run: 只有设置为 False 才能真正购买服务器,可以设置为 True 来再次获得 ip 列表,建议购买服务器后立即设置为 True
instance_type: 实例类型
instance_amount: 实例数量q
ordered_hours: 购买小时数
instance_name: 按需修改
instances: 确认 AliyunRunInstances 实例化的参数
client: 确认服务器地址信息
request.set_InstanceName('name'): 跟 instance_name 一致
with open('ip_list_name', 'w') as ip_file: 按需修改
"""
# set parameters for instances
aliyun_region_id = {1: 'cn-zhangjiakou', 2: 'cn-beijing', 3: 'cn-hangzhou', 4: 'cn-shanghai', 5: 'cn-shenzhen',
6: 'cn-qingdao', 7: 'cn-huhehaote'}
image_id = 'm-8vb3i5iaiyul8xshx9w3'
# keep this variable True in case of mis-operation
is_dry_run = True # change this variable to False to run instances in effect
instance_type = 'ecs.r6.xlarge' # change this value to instance type you need
instance_amount = 1
ordered_hours = 1 # generate auto release time according to ordered hours
time_now_utc = datetime.datetime.utcnow()
release_time = time_now_utc + datetime.timedelta(hours=ordered_hours)
auto_release_time = release_time.strftime('%Y-%m-%dT%H:%M:%SZ')
instance_name = 'geth-pbft-%s' % time_now_utc.strftime('%Y%m%dT%H%M%S')
instances = AliyunRunInstances(aliyun_region_id[1], image_id, instance_type, instance_amount,
auto_release_time, instance_name, is_dry_run)
instances.run()
print('waiting...')
time.sleep(20)
# ------------------------------------------------------------------
# write IP addresses of instances to file
client = AcsClient(ACCESS_KEY_ID, ACCESS_SECRET, '<KEY>')
request = DescribeInstancesRequest()
request.set_accept_format('json')
request.set_PageSize(100)
request.set_InstanceName("geth-pbft*")
response = client.do_action_with_exception(request)
r = json.loads(str(response, encoding='utf-8'))
instances = (r['Instances']['Instance'])
ips = [ins['PublicIpAddress']['IpAddress'][0] + '\n' for ins in instances]
print(len(ips), ips)
with open('../config/my_ip.txt', 'w') as ip_file:
ip_file.writelines(ips)
``` |
{
"source": "Jianghuchengphilip/Master-art-punk",
"score": 3
} |
#### File: Jianghuchengphilip/Master-art-punk/colors.py
```python
import PIL
import csv
import cv2
import math
import random
from PIL import Image
import numpy as np
from matplotlib import image
from sklearn.cluster import KMeans
from os import listdir
import png
from function.subject import canvas
import os
class ColorMultiImage(object):
def __init__(self,k = 30,init_method = 'random',random_state = 88):
self.k = k
self.init_method = init_method
self.random_state = random_state
self.color_number = 0
def box_method(self,color_data, group_distance,colors_number):
color_data_random_box = []
for i in range(0, colors_number):
color_data_random_box.append(
color_data[(len(color_data) - 1) - (i * group_distance + random.randint(0, group_distance - 1))])
return color_data_random_box
def rgb_to_hex(self,rgb):
color = ''
for i in rgb:
num = round(float(i))
color += str(hex(num))[-2:].replace('x', '0').lower()
return color
def get_all_colors_list(self,model, k):
colors = []
labels_list = np.arange(0, k + 1)
(proportion, _) = np.histogram(model.labels_, bins=labels_list)
proportion = proportion.astype("float")
proportion /= proportion.sum()
for (_, color) in sorted(zip(proportion, model.cluster_centers_), key=lambda x: x[0], reverse=True):
colors.append(list(map(int, color)))
return colors
def get_main_colors(self,directory):
colors_all_out = []
for filename in listdir(directory):
img = cv2.imread(directory + filename)
try:
PIL.Image.fromarray(image.imread(directory + filename))
except FileNotFoundError:
continue
img_data = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
r, g, _ = cv2.split(img_data)
img = img.reshape((img_data.shape[0] * img_data.shape[1], 3))
print("加载" + filename + "中......")
model = KMeans(n_clusters=self.k, init=self.init_method, random_state=self.random_state)
model.fit(img)
colors_all_out += self.get_all_colors_list(model, self.k)
print("完成" + filename + "提取!")
return colors_all_out
def get_color_data(self,color_distance_filepath,colors_number):
f = open(color_distance_filepath, "r+", encoding="utf-8-sig")
reader = csv.reader(f)
color_data_sort = list(reader)
box = self.box_method(color_data_sort, len(color_data_sort) // colors_number,colors_number)
return [self.rgb_to_hex(i[:3]) for i in box]
def colour_distance(self,rgb_1, rgb_2):
R_1, G_1, B_1 = rgb_1
R_2, G_2, B_2 = rgb_2
rmean = (R_1 + R_2) / 2
R = R_1 - R_2
G = G_1 - G_2
B = B_1 - B_2
return math.sqrt((2 + rmean / 256) * (R ** 2) + 4 * (G ** 2) + (2 + (255 - rmean) / 256) * (B ** 2))
def random_colors(self):
colorArr = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']
color = ""
for random_value in range(6):
color += colorArr[random.randint(0, 14)]
return color
def merge(self,sticker0, sticker1):
colors = sticker0['colors']
index = {}
for i, color in enumerate(sticker1['colors']):
if color not in colors:
colors.append(color)
index[i] = colors.index(color)
self.colors_number = colors.index(color)
else:
index[i] = colors.index(color)
for i, row in enumerate(sticker1['data']):
for j, color in enumerate(row):
if color > 0:
sticker0['data'][i][j] = index[color]
return sticker0
def merges(self,stickers):
if len(stickers) >= 2:
sticker = self.merge(stickers.pop(0), stickers.pop(0))
stickers.insert(0, sticker)
else:
return stickers[0]
return self.merges(stickers)
def generate(self,image_data, name,number,color_distance_filepath,coloring_style,colors_number):
palette = [(255, 255, 255,0)]
if coloring_style == 0:
colors = ['000000'] + [self.random_colors() for i in range(0,colors_number)] #随机颜色
if coloring_style == 1:
colors = ['000000'] + self.get_color_data(color_distance_filepath,colors_number) # 艺术家风格
for color in colors:
color = [int(c, 16) for c in (color[:2], color[2:4], color[4:])]
palette.append(tuple(color))
image = np.asarray([palette[np.asarray(image_data['data']).flatten()[x]] for x in range(0,len(np.asarray(image_data['data']).flatten()))]).flatten().reshape((24,24,3))
dirs = os.getcwd() + "\\output\\" + str(name) + "-output\\"
if not os.path.exists(dirs):
os.makedirs(dirs)
image = cv2.resize(image,(2500,2500),interpolation=cv2.INTER_NEAREST)
cv2.imwrite(dirs + name + number + ".png",image)
```
#### File: Jianghuchengphilip/Master-art-punk/model.py
```python
import csv
import os
from settings import DATACENTER_ID,WORKER_ID,SEQUENCE,color_distance
from colors import ColorMultiImage
import numpy as np
from function.snowflake import IdWorker
def training(color_data_path):
black_rbg = [0, 0, 0]
color_data_distance = []
color_distance_filepath = os.getcwd() + "\\output\\csv\\" + str(IdWorker(DATACENTER_ID, WORKER_ID, SEQUENCE).get_id()) + ".csv"
get_model = ColorMultiImage()
color_distance_csv = open(color_distance_filepath, "a+", newline="", encoding="utf-8-sig")
color_data = get_model.get_main_colors(color_data_path)
writer = csv.writer(color_distance_csv)
for rbg in color_data:
color_data_distance.append(rbg + [get_model.colour_distance(rbg, black_rbg)])
color_data_sort = sorted(color_data_distance, key=lambda x: x[3])
color_data_sort = np.array(color_data_sort)
color_data_sort_index = (color_data_sort[:, 3] > color_distance)
color_data_sort = color_data_sort[color_data_sort_index]
for rbg in color_data_sort:
writer.writerow(tuple(rbg))
return color_distance_filepath
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.