metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jianghy1998/TruPercept",
"score": 2
} |
#### File: TruPercept/tru_percept/std_utils.py
```python
import os
import errno
import shutil
import logging
import random
import config as cfg
import constants as const
def make_dir(filepath):
if not os.path.exists(os.path.dirname(filepath)):
try:
os.makedirs(os.path.dirname(filepath))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def delete_all_subdirs(subdir):
delete_subdir(subdir)
for entity_str in const.valid_perspectives():
perspect_dir = os.path.join(cfg.ALT_PERSP_DIR, entity_str)
delete_subdir(subdir, perspect_dir)
def delete_subdir(subdir, basedir=cfg.DATASET_DIR):
dirpath = os.path.join(basedir, subdir)
if os.path.exists(dirpath) and os.path.isdir(dirpath):
logging.debug("Deleting directory: {}".format(dirpath))
shutil.rmtree(dirpath)
def save_objs_to_file(objs, idx, out_dir, results=False):
out_file = out_dir + '/{:06d}.txt'.format(idx)
with open(out_file, 'w+') as f:
if objs is None:
return
for obj in objs:
occ_lvl = 0
if obj.occlusion > 0.2:
occ_lvl = 1
if obj.occlusion > 0.5:
occ_lvl = 2
if results:
kitti_text_3d = '{} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}'.format(obj.type,
obj.truncation, occ_lvl, obj.alpha, obj.x1, obj.y1, obj.x2,
obj.y2, obj.h, obj.w, obj.l, obj.t[0], obj.t[1], obj.t[2], obj.ry, obj.score)
else:
kitti_text_3d = '{} {} {} {} {} {} {} {} {} {} {} {} {} {} {}'.format(obj.type,
obj.truncation, occ_lvl, obj.alpha, obj.x1, obj.y1, obj.x2,
obj.y2, obj.h, obj.w, obj.l, obj.t[0], obj.t[1], obj.t[2], obj.ry)
f.write('%s\r\n' % kitti_text_3d)
# Returns a boolean True with the given probability
def decision_true(probability):
return random.random() < probability
```
#### File: tru_percept/tools/malicious_entity_designation.py
```python
import os
import config as cfg
import constants as const
import std_utils
def main():
filepath = cfg.DATASET_DIR + '/' + cfg.FALSE_DETECTIONS_SUBDIR + '/' + \
'random_{}.txt'.format(cfg.RANDOM_MALICIOUS_PROBABILITY)
with open(filepath, 'w') as f:
for entity_str in const.valid_perspectives():
if std_utils.decision_true(cfg.RANDOM_MALICIOUS_PROBABILITY):
f.write('%s\n' % entity_str)
main()
```
#### File: TruPercept/tru_percept/vehicle_trust.py
```python
import os
import shutil
import numpy as np
import logging
from wavedata.tools.obj_detection import obj_utils
import config as cfg
import trust_utils
import std_utils
import constants as const
import message_evaluations
import perspective_utils as p_utils
def calculate_vehicle_trusts():
# Before calculating, first delete all previous vehicle trust values
std_utils.delete_subdir(cfg.V_TRUST_SUBDIR)
# Initialize dictionary for vehicle trust values
# Entity ID/VehicleTrust object pairs
trust_dict = {}
velo_dir = cfg.DATASET_DIR + '/velodyne'
velo_files = os.listdir(velo_dir)
for idx in range(cfg.MIN_IDX, cfg.MAX_IDX + 1):
filepath = velo_dir + '/{:06d}.bin'.format(idx)
if not os.path.isfile(filepath):
logging.debug("Could not find file: %s", filepath)
logging.debug("Stopping at idx: %d", idx)
break
# Load stale trust dict if we need it (past msg fresh period)
stale_trust_dict = {}
if (idx - cfg.STALE_EVALS_TIME) >= 0:
stale_trust_dict = load_vehicle_trust_objs(idx - cfg.STALE_EVALS_TIME)
# First for the ego vehicle
compute_vehicle_trust(cfg.DATASET_DIR, const.ego_id(), idx, trust_dict, stale_trust_dict)
# Then for all the alternate perspectives
for entity_str in const.valid_perspectives():
perspect_dir = os.path.join(cfg.ALT_PERSP_DIR, entity_str)
compute_vehicle_trust(perspect_dir, int(entity_str), idx, trust_dict, stale_trust_dict)
write_trust_vals(trust_dict, idx)
print("Finished calculating vehicle trusts")
def compute_vehicle_trust(persp_dir, persp_id, idx, trust_dict, stale_trust_dict):
msg_evals = message_evaluations.load_msg_evals(persp_dir, idx)
detections = p_utils.get_detections(persp_dir, persp_dir, idx,
persp_id, persp_id, results=cfg.USE_RESULTS)
eval_lists = {}
for msg_eval in msg_evals:
if msg_eval.det_idx in eval_lists:
eval_lists[msg_eval.det_idx].append(msg_eval)
else:
eval_lists[msg_eval.det_idx] = [msg_eval]
eval_count = 0
trust_sum = 0
logging.debug(eval_lists)
for det_idx, eval_list in eval_lists.items():
num = 0
den = 0
logging.debug("det_idx: %d", det_idx)
logging.debug("Eval list: {}".format(eval_list))
logging.debug("Eval list len: %d", len(eval_list))
for eval_item in eval_list:
# Don't use own evaluations for vehicle trust
if eval_item.evaluator_id != persp_id:
num += eval_item.evaluator_certainty * eval_item.evaluator_score
den += eval_item.evaluator_certainty
if den == 0:
msg_trust = 0
else:
msg_trust = num / den
msg_trust = max(msg_trust, -1.0)
msg_trust = min(msg_trust, 1.0)
trust_sum += msg_trust * detections[det_idx].obj.score
eval_count += detections[det_idx].obj.score
# Obtain VehicleTrust object, create new object if new vehicle
print_test = False
if persp_id in trust_dict:
v_trust = trust_dict[persp_id]
else:
v_trust = trust_utils.VehicleTrust()
logging.debug("New trust object")
logging.debug("v_trust value: %f", v_trust.val)
trust_dict[persp_id] = v_trust
print_test = True
# Update trust with current evaluations
v_trust.sum += trust_sum
v_trust.count += eval_count
v_trust.curr_score = trust_sum
v_trust.curr_count = eval_count
# Remove stale evaluations
if (idx - cfg.STALE_EVALS_TIME) >= 0:
if persp_id in stale_trust_dict:
v_trust.sum -= stale_trust_dict[persp_id].curr_score
v_trust.count -= stale_trust_dict[persp_id].curr_count
if v_trust.count > 0:
v_trust.val = v_trust.sum / v_trust.count
else:
v_trust.val = cfg.DEFAULT_VEHICLE_TRUST_VAL
if print_test:
logging.debug("test value: %f", v_trust.val)
logging.debug("map value: %f", trust_dict[persp_id].val)
############################################################################################
# Utility functions
# Returns a dictionary with the vehicle trust values from the given index
def load_vehicle_trust_objs(idx):
# Define the dictionary
v_trust_dict = {}
if idx < 0:
return {}
filepath = cfg.DATASET_DIR + '/' + cfg.V_TRUST_SUBDIR + '/{:06d}.txt'.format(idx)
if not os.path.isfile(filepath):
print("Could not find vehicle trust filepath: ", filepath)
return {}
# Extract the list
if os.stat(filepath).st_size == 0:
print("Filesize 0 for vehicle trust filepath: ", filepath)
return {}
p = np.loadtxt(filepath, delimiter=' ',
dtype=str,
usecols=np.arange(start=0, step=1, stop=6))
# Check if the output is single dimensional or multi dimensional
if len(p.shape) > 1:
label_num = p.shape[0]
else:
label_num = 1
for idx in np.arange(label_num):
trust_obj = trust_utils.VehicleTrust()
if label_num > 1:
trust_obj.val = float(p[idx,1])
trust_obj.sum = float(p[idx,2])
trust_obj.count = int(p[idx,3])
trust_obj.curr_score = float(p[idx,4])
trust_obj.curr_count = int(p[idx,5])
v_trust_dict[int(p[idx,0])] = trust_obj
else:
trust_obj.val = float(p[1])
trust_obj.sum = float(p[2])
trust_obj.count = int(p[3])
trust_obj.curr_score = float(p[4])
trust_obj.curr_count = int(p[5])
v_trust_dict[int(p[0])] = trust_obj
return v_trust_dict
def vehicle_trust_value(trust_values, v_id):
if v_id in trust_values:
return trust_values[v_id].val
else:
return cfg.DEFAULT_VEHICLE_TRUST_VAL
def write_trust_vals(trust_dict, idx):
trust_vals_array = np.zeros([len(trust_dict), 6])
v_count = 0
for entity_id, trust_obj in trust_dict.items():
trust_vals_array[v_count,0] = entity_id
trust_vals_array[v_count,1] = max(0., trust_obj.val)
trust_vals_array[v_count,2] = trust_obj.sum
trust_vals_array[v_count,3] = trust_obj.count
trust_vals_array[v_count,4] = trust_obj.curr_score
trust_vals_array[v_count,5] = trust_obj.curr_count
v_count += 1
filepath = cfg.DATASET_DIR + '/' + cfg.V_TRUST_SUBDIR + '/{:06d}.txt'.format(idx)
std_utils.make_dir(filepath)
with open(filepath, 'w+') as f:
np.savetxt(f, trust_vals_array,
newline='\r\n', fmt='%i %f %f %i %f %i')
``` |
{
"source": "jiangjiajun/Adlik",
"score": 2
} |
#### File: models/targets/saved_model.py
```python
from typing import NamedTuple, Optional, Sequence, Tuple
import tensorflow as tf
from .. import data_format, repository
from ..data_format import DataFormat
from ...protos.generated.model_config_pb2 import ModelInput, ModelOutput
class Input(NamedTuple):
name: str
tensor: tf.Tensor
data_format: Optional[DataFormat] = None
class Output(NamedTuple):
name: str
tensor: tf.Tensor
@repository.REPOSITORY.register_target_model('tf')
class SavedModel(NamedTuple):
inputs: Sequence[Input]
outputs: Sequence[Output]
session: tf.compat.v1.Session
def get_inputs(self) -> Sequence[ModelInput]:
return [ModelInput(name=item.name,
data_type=item.tensor.dtype.as_datatype_enum,
format=data_format.as_model_config_data_format(item.data_format),
dims=[-1 if dim is None else dim for dim in item.tensor.shape[1:]])
for item in self.inputs]
def get_outputs(self) -> Sequence[ModelOutput]:
return [ModelOutput(name=item.name,
data_type=item.tensor.dtype.as_datatype_enum,
dims=[-1 if dim is None else dim for dim in item.tensor.shape[1:]])
for item in self.outputs]
def save(self, path: str) -> None:
with self.session.graph.as_default():
builder = tf.compat.v1.saved_model.Builder(export_dir=path)
builder.add_meta_graph_and_variables(
sess=self.session,
tags=[tf.compat.v1.saved_model.tag_constants.SERVING],
signature_def_map={
'predict': tf.compat.v1.saved_model.predict_signature_def(inputs={item.name: item.tensor
for item in self.inputs},
outputs={item.name: item.tensor
for item in self.outputs})
},
clear_devices=True
)
builder.save()
@staticmethod
def get_platform() -> Tuple[str, str]:
return 'tensorflow', tf.version.VERSION
```
#### File: model_compiler/compilers/test_saved_model_to_openvino_model.py
```python
from unittest import TestCase
import tensorflow as tf
import model_compiler.compilers.saved_model_to_openvino_model as compiler
from model_compiler.compilers.saved_model_to_openvino_model import Config
from model_compiler.models.targets.saved_model import Input, Output, SavedModel
from model_compiler.protos.generated.model_config_pb2 import ModelInput, ModelOutput
def _make_saved_model() -> SavedModel:
with tf.Graph().as_default(), tf.compat.v1.Session().as_default() as session:
input_x = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 2, 3, 4], name='x')
input_y = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 2, 3, 4], name='y')
weight = tf.Variable(initial_value=4.2, dtype=tf.float32)
output_z = tf.multiply(input_x + input_y, weight, name='z')
session.run(weight.initializer)
return SavedModel(inputs=[Input(name='x', tensor=input_x, data_format=None),
Input(name='y', tensor=input_y, data_format=None)],
outputs=[Output(name='z', tensor=output_z)],
session=session)
class CompileSourceTestCase(TestCase):
def test_compile_with_no_params(self):
config = Config.from_json({'max_batch_size': 1})
compiled = compiler.compile_source(source=_make_saved_model(), config=config)
self.assertEqual(compiled.get_inputs(),
[ModelInput(name='x', data_type=tf.float32.as_datatype_enum,
format=ModelInput.FORMAT_NONE, dims=[2, 3, 4]), # pylint: disable=no-member
ModelInput(name='y', data_type=tf.float32.as_datatype_enum,
format=ModelInput.FORMAT_NONE, dims=[2, 3, 4])]) # pylint: disable=no-member
self.assertEqual(compiled.get_outputs(),
[ModelOutput(name='z', data_type=tf.float32.as_datatype_enum, dims=[2, 3, 4])])
def test_compile_with_all_params_with_shape(self):
config = Config.from_json({'input_names': ['x', 'y'],
'input_shapes': [[1, 2, 3, 4], [1, 2, 3, 4]],
'output_names': ['z'],
'enable_nhwc_to_nchw': False})
compiled = compiler.compile_source(source=_make_saved_model(), config=config)
self.assertEqual(compiled.get_inputs(),
[ModelInput(name='x', data_type=tf.float32.as_datatype_enum,
format=ModelInput.FORMAT_NONE, dims=[2, 3, 4]), # pylint: disable=no-member
ModelInput(name='y', data_type=tf.float32.as_datatype_enum,
format=ModelInput.FORMAT_NONE, dims=[2, 3, 4])]) # pylint: disable=no-member
self.assertEqual(compiled.get_outputs(),
[ModelOutput(name='z', data_type=tf.float32.as_datatype_enum, dims=[2, 3, 4])])
def test_compile_with_all_params_with_enable_nhwc_to_nchw_true(self):
config = Config.from_json({'input_names': ['x', 'y'],
'output_names': ['z'],
'max_batch_size': 1,
'enable_nhwc_to_nchw': True})
compiled = compiler.compile_source(source=_make_saved_model(), config=config)
self.assertEqual(compiled.get_inputs(),
[ModelInput(name='x', data_type=tf.float32.as_datatype_enum,
format=ModelInput.FORMAT_NONE, dims=[4, 2, 3]), # pylint: disable=no-member
ModelInput(name='y', data_type=tf.float32.as_datatype_enum,
format=ModelInput.FORMAT_NONE, dims=[4, 2, 3])]) # pylint: disable=no-member
self.assertEqual(compiled.get_outputs(),
[ModelOutput(name='z', data_type=tf.float32.as_datatype_enum, dims=[4, 2, 3])])
``` |
{
"source": "jiangjiajun/PaddleClas",
"score": 2
} |
#### File: ppcls/utils/model_zoo.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import requests
import shutil
import tarfile
import tqdm
import zipfile
from ppcls.modeling import similar_architectures
from ppcls.utils.check import check_architecture
from ppcls.utils import logger
__all__ = ['get']
DOWNLOAD_RETRY_LIMIT = 3
class UrlError(Exception):
""" UrlError
"""
def __init__(self, url='', code=''):
message = "Downloading from {} failed with code {}!".format(url, code)
super(UrlError, self).__init__(message)
class ModelNameError(Exception):
""" ModelNameError
"""
def __init__(self, message=''):
super(ModelNameError, self).__init__(message)
class RetryError(Exception):
""" RetryError
"""
def __init__(self, url='', times=''):
message = "Download from {} failed. Retry({}) limit reached".format(
url, times)
super(RetryError, self).__init__(message)
def _get_url(architecture):
prefix = "https://paddle-imagenet-models-name.bj.bcebos.com/"
fname = architecture + "_pretrained.tar"
return prefix + fname
def _move_and_merge_tree(src, dst):
"""
Move src directory to dst, if dst is already exists,
merge src to dst
"""
if not os.path.exists(dst):
shutil.move(src, dst)
elif os.path.isfile(src):
shutil.move(src, dst)
else:
for fp in os.listdir(src):
src_fp = os.path.join(src, fp)
dst_fp = os.path.join(dst, fp)
if os.path.isdir(src_fp):
if os.path.isdir(dst_fp):
_move_and_merge_tree(src_fp, dst_fp)
else:
shutil.move(src_fp, dst_fp)
elif os.path.isfile(src_fp) and \
not os.path.isfile(dst_fp):
shutil.move(src_fp, dst_fp)
def _download(url, path):
"""
Download from url, save to path.
url (str): download url
path (str): download to given path
"""
if not os.path.exists(path):
os.makedirs(path)
fname = os.path.split(url)[-1]
fullname = os.path.join(path, fname)
retry_cnt = 0
while not os.path.exists(fullname):
if retry_cnt < DOWNLOAD_RETRY_LIMIT:
retry_cnt += 1
else:
raise RetryError(url, DOWNLOAD_RETRY_LIMIT)
logger.info("Downloading {} from {}".format(fname, url))
req = requests.get(url, stream=True)
if req.status_code != 200:
raise UrlError(url, req.status_code)
# For protecting download interupted, download to
# tmp_fullname firstly, move tmp_fullname to fullname
# after download finished
tmp_fullname = fullname + "_tmp"
total_size = req.headers.get('content-length')
with open(tmp_fullname, 'wb') as f:
if total_size:
for chunk in tqdm.tqdm(
req.iter_content(chunk_size=1024),
total=(int(total_size) + 1023) // 1024,
unit='KB'):
f.write(chunk)
else:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
shutil.move(tmp_fullname, fullname)
return fullname
def _decompress(fname):
"""
Decompress for zip and tar file
"""
logger.info("Decompressing {}...".format(fname))
# For protecting decompressing interupted,
# decompress to fpath_tmp directory firstly, if decompress
# successed, move decompress files to fpath and delete
# fpath_tmp and remove download compress file.
fpath = os.path.split(fname)[0]
fpath_tmp = os.path.join(fpath, 'tmp')
if os.path.isdir(fpath_tmp):
shutil.rmtree(fpath_tmp)
os.makedirs(fpath_tmp)
if fname.find('tar') >= 0:
with tarfile.open(fname) as tf:
tf.extractall(path=fpath_tmp)
elif fname.find('zip') >= 0:
with zipfile.ZipFile(fname) as zf:
zf.extractall(path=fpath_tmp)
else:
raise TypeError("Unsupport compress file type {}".format(fname))
for f in os.listdir(fpath_tmp):
src_dir = os.path.join(fpath_tmp, f)
dst_dir = os.path.join(fpath, f)
_move_and_merge_tree(src_dir, dst_dir)
shutil.rmtree(fpath_tmp)
os.remove(fname)
def _check_pretrained_name(architecture):
assert isinstance(architecture, str), \
("the type of architecture({}) should be str". format(architecture))
with open('./configs/pretrained.list') as flist:
pretrained = [line.strip() for line in flist]
similar_names = similar_architectures(architecture, pretrained)
model_list = ', '.join(similar_names)
err = "{} is not exist! Maybe you want: [{}]" \
"".format(architecture, model_list)
if architecture not in similar_names:
raise ModelNameError(err)
def get(architecture, path, decompress=True):
"""
Get the pretrained model.
"""
_check_pretrained_name(architecture)
url = _get_url(architecture)
fname = _download(url, path)
if decompress: _decompress(fname)
logger.info("download {} finished ".format(fname))
```
#### File: PaddleClas/tools/download.py
```python
import sys
import argparse
from ppcls import model_zoo
def parse_args():
def str2bool(v):
return v.lower() in ("true", "t", "1")
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--architecture', type=str, default='ResNet50')
parser.add_argument('-p', '--path', type=str, default='./pretrained/')
parser.add_argument('-d', '--decompress', type=str2bool, default=True)
args = parser.parse_args()
return args
def main():
args = parse_args()
model_zoo.get(args.architecture, args.path, args.decompress)
if __name__ == '__main__':
main()
``` |
{
"source": "jiangjin1999/Sentence-level-detection-on-CSC",
"score": 2
} |
#### File: jiangjin1999/Sentence-level-detection-on-CSC/data.py
```python
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 10 09:21:39 2020
@author: Administrator
"""
import re
import torch
import random
from transformers import BertModel, BertTokenizer, BertConfig
from transformers.modeling_bert import BertEmbeddings, BertEncoder
from pandas.core.frame import DataFrame
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
import warnings
import time
import os
import copy
import math
random.seed(2)
warnings.filterwarnings('ignore')
os.chdir('D:/ajiangj/exp:2---learning')
path = os.getcwd()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.cuda.empty_cache()
time_start = time.time()
file_sentence = open(path + '/merged/TrainingInputAll.txt', encoding='utf-8', errors='ignore')
file_data_sentence = file_sentence.readlines()
# example: '(sighan13-id=1)\t有一天的晚上,大家都睡得很安祥,突然一阵巨晃,我们家书贵倒了,这次地震惊动了全国。\n'
print("训练集句子的个数为:", len(file_data_sentence))
file_true = open(path + '/merged/TrainingTruthAll.txt', encoding='utf-8', errors='ignore')
file_data_true = file_true.readlines()
# '(sighan13-id=1), 28, 柜\n'
print(len(file_data_true))
data_right = []
data_wrong = []
data = []
bad_data = []
for i in range(len(file_data_sentence)):
inp = file_data_sentence[i]
trth = file_data_true[i]
inp_fields = inp.strip().split("\t")
trth_fields = trth.strip().split(", ")
text_a = inp_fields[1]
trth_fields_wrong_word = copy.deepcopy(trth_fields)
for j in range(1, len(trth_fields), 2): # 得到正确的句子
index = int(trth_fields[j])
right_word = trth_fields[j + 1]
wrong_word = text_a[index - 1]
trth_fields_wrong_word[j + 1] = wrong_word
text_a = re.sub(text_a[index - 1], right_word, text_a)
right_sentence_temp = text_a
for k in range(1, len(trth_fields), 2): # 一句话一个错词
temp_list_right = []
temp_list_wrong = []
index = int(trth_fields[k])
right_word = trth_fields[k + 1]
wrong_word = trth_fields_wrong_word[k + 1]
temp_right_sentences = right_sentence_temp
temp_wrong_sentences = re.sub(right_sentence_temp[index - 1], wrong_word, right_sentence_temp)
temp_list_right.append(temp_right_sentences)
temp_list_right.append(1)
temp_list_right.append(index)
temp_list_right.append(right_word)
temp_list_right.append(wrong_word)
data.append(temp_list_right)
temp_list_wrong.append(temp_wrong_sentences)
temp_list_wrong.append(0)
temp_list_wrong.append(index)
temp_list_wrong.append(right_word)
temp_list_wrong.append(wrong_word)
data.append(temp_list_wrong)
if right_word == wrong_word:
bad_data.append(temp_list_right)
bad_data.append(temp_list_wrong)
# 加上对的和错的,一共2432个句子,
# 前面的是正确的字,后面的是错误的字
# 1是正确的,0是错误的
train_data = data
test_sentence = open(path + '/merged/TestInput.txt', encoding='utf-8', errors='ignore')
test_data_sentence = test_sentence.readlines()
# example: '(pid=B2-4252-8)\t如果老师一开始用几个方法看小孩子,老师们会利用一个很简单的方法,他们不会了解甚么时候小孩子可以开始面对功课,了解礼貌的行为或是甚么东西是不对的。\n'
print("测试集句子的个数为:", len(test_data_sentence))
test_true = open(path + '/merged/TestTruth.txt', encoding='utf-8', errors='ignore')
test_data_true = test_true.readlines()
# 'B2-4252-2, 3, 现\n'
test_data = []
for i in range(len(test_data_sentence)):
temp_list = []
inp = test_data_sentence[i]
trth = test_data_true[i]
inp_fields = inp.strip().split("\t")
trth_fields = trth.strip().split(", ")
text_b = inp_fields[1]
flag = 1
if len(trth_fields) == 2:
flag = 1
else:
flag = 0
right_word_info = trth_fields[1:len(trth_fields)]
temp_list.append(text_b)
temp_list.append(flag)
temp_list.append(right_word_info)
test_data.append(temp_list)
model_name = 'bert-base-chinese'
tokenizer = BertTokenizer.from_pretrained(model_name)
model = BertModel.from_pretrained(model_name)
config = BertConfig.from_pretrained(model_name)
model.to(device)
# test
# inputtext = "今天心情情很好啊,买了很多东西,我特别喜欢,终于有了自己喜欢的电子产品,这次总算可以好好学习了"
# tokenized_text=tokenizer.encode(inputtext)
# input_ids=torch.tensor(tokenized_text).view(-1,len(tokenized_text))
# input_ids=input_ids.to(device)
# outputs=model(input_ids)
##outputs[0].shape
##Out[145]: torch.Size([1, 49, 768])-- embedding dim 是768
##outputs[1].shape
##torch.Size([1, 768])
##对应字向量表示和句向量表示
# outputs[0].shape,outputs[1].shape
###
###
# 二、 微调
import torch
from torch import nn
from torch import optim
import transformers as tfs
# import math
import time
def series2int(series):
length = len(series)
a = []
for i in range(length):
temp = int(series[i])
a.append(temp)
return a
def series2list(series):
length = len(series)
a = []
for i in range(length):
temp = str(series[i])
a.append(temp)
return a
class BertClassificationModel(nn.Module):
def __init__(self):
super(BertClassificationModel, self).__init__()
model_class, tokenizer_class, pretrained_weights = (tfs.BertModel, tfs.BertTokenizer, 'bert-base-chinese')
self.tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
# 嵌入层BertEmbeddings().
self.embeddings = BertEmbeddings(config)
# 多层(12层)多头自注意力(multi-head self attention)编码层BertEncoder.
self.encoder = BertEncoder(config)
self.bert = model_class.from_pretrained(pretrained_weights)
self.dense = nn.Linear(768, 2) # bert默认的隐藏单元数是768, 输出单元是2,表示二分类
self.dropout = nn.Dropout(p=0.5) # dropout训练
def forward(self, batch_sentences):
batch_tokenized = self.tokenizer.batch_encode_plus(batch_sentences, add_special_tokens=True,
pad_to_max_length=True) # tokenize、add special token、pad
input_ids = torch.tensor(batch_tokenized['input_ids'])
attention_mask = torch.tensor(batch_tokenized['attention_mask'])
input_ids = input_ids.to(device)
attention_mask = attention_mask.to(device)
bert_output = self.bert(input_ids, attention_mask=attention_mask)
bert_cls_hidden_state = bert_output[0][:, 0, :] # 提取[CLS]对应的隐藏状态
self.mask_embeddings = self.embeddings.word_embeddings.weight[103]
dropout_output = self.dropout(bert_cls_hidden_state)
linear_output = self.dense(dropout_output)
return linear_output
def train_test_val_split(df, ratio_train, ratio_test, ratio_val):
train, middle = train_test_split(df, test_size=1 - ratio_train)
ratio = ratio_val / (1 - ratio_train)
test, validation = train_test_split(middle, test_size=ratio)
return train, test, validation
def perf_measure(y_true, y_pred):
TP, FP, TN, FN = 0, 0, 0, 0
cla_index = []
TP_index, FP_index, FN_index, TN_index = [], [], [], []
for i in range(len(y_true)):
if y_true[i] == 1 and y_pred[i] == 1:
TP += 1
TP_index.append(i)
if y_true[i] == 0 and y_pred[i] == 1:
FP += 1
FP_index.append(i)
if y_true[i] == 0 and y_pred[i] == 0:
FN += 1
FN_index.append(i)
if y_true[i] == 1 and y_pred[i] == 0:
TN += 1
TN_index.append(i)
cla_index.append([TP_index, FP_index, TN_index, FN_index])
return TP, FP, TN, FN, cla_index
def evaluate_Model(inputs, targets):
predicted_list = []
with torch.no_grad():
for i in range(len(inputs)):
outputs = bert_classifier_model([inputs[i]])
predicted = torch.max(outputs, 1)
predicted_list.append(int(predicted.indices))
y_true, y_pred = targets, predicted_list
TP, FP, TN, FN, cla_index = perf_measure(y_true, y_pred)
print("正确率是:", (TP + FN) / (TP + FP + TN + FN))
return (TP + FN) / (TP + FP + TN + FN), TP, FP, TN, FN, cla_index
def evaluate_Model_list(inputs, targets):
predicted_list = []
with torch.no_grad():
for i in range(len(inputs)):
outputs = bert_classifier_model([inputs[i]])
predicted = torch.max(outputs, 1)
predicted_list.append(int(predicted.indices))
y_true, y_pred = targets, predicted_list
TP, FP, TN, FN, cla_index = perf_measure(y_true, y_pred)
print("正确率是:", (TP + FN) / (TP + FP + TN + FN))
return (TP + FN) / (TP + FP + TN + FN), TP, FP, TN, FN, cla_index, y_true, y_pred
random.shuffle(train_data)
train_list, test_list = train_data[math.ceil(0.5 * len(train_data)):math.ceil(0.6 * len(train_data))], test_data
# random.shuffle(test)
train, test = DataFrame(train_list), DataFrame(test_list)
train_inputs, train_targets = train[0].values, train[1].values
test_inputs, test_targets = test[0].values, test[1].values
print("Train set shape:", train_inputs.shape)
print(train[1].value_counts()) # 查看数据集中标签的分布
print("test set shape:", test_inputs.shape)
print(test[1].value_counts()) # 查看数据集中标签的分布
train_inputs, test_inputs = series2list(train_inputs), series2list(test_inputs)
train_targets, test_targets = series2int(train_targets), series2int(test_targets)
batch_size = 16
batch_count = int(len(train_inputs) / batch_size)
batch_train_inputs, batch_train_targets = [], []
for i in range(batch_count):
batch_train_inputs.append(train_inputs[i * batch_size: (i + 1) * batch_size])
batch_train_targets.append(train_targets[i * batch_size: (i + 1) * batch_size])
# 51的时候已经到0.0007
# 之后还是要,根据,把代码调成,根据他的dev1的loss去判断
# train the model
epochs = 20
# lr = 0.00001
print_every_batch = 5
bert_classifier_model = BertClassificationModel()
# bert_classifier_model = torch.nn.DataParallel(model, device_ids=[0, 1])
bert_classifier_model.to(device)
# optimizer = optim.SGD(bert_classifier_model.parameters(), lr=lr, momentum=0.9)
params = bert_classifier_model.parameters()
optimizer = torch.optim.Adam(params,
lr=2e-6,
betas=(0.9, 0.999),
eps=1e-8,
amsgrad=False)
criterion = nn.CrossEntropyLoss()
dev_list = []
test_list = []
ans_test = open("data_clean_twinning_test_sighan_adam0.txt", "w")
cla_index_test = []
loss_change = []
for epoch in range(epochs):
bert_classifier_model.train()
print_avg_loss = 0
for i in range(batch_count):
inputs = batch_train_inputs[i]
labels = torch.tensor(batch_train_targets[i])
labels = labels.to(device)
optimizer.zero_grad()
outputs = bert_classifier_model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
print_avg_loss += loss.item()
if i % print_every_batch == (print_every_batch - 1):
print("epoch: %d, Batch: %d, Loss: %.4f" % ((epoch + 1), (i + 1), print_avg_loss / print_every_batch))
loss_change.append(str(print_avg_loss / print_every_batch)) # 收集loss曲线的数据
print_avg_loss = 0
# 保存
if epoch > -1:
bert_classifier_model.eval()
torch.save(bert_classifier_model, path + '/twinning_test_sighan' + str(epoch) + '.pkl')
acc_test, TP_test, FP_test, TN_test, FN_test, cla_index_test = evaluate_Model(test_inputs, test_targets)
test_list.append([acc_test, TP_test, FP_test, TN_test, FN_test])
ans_test.writelines(" epoch" + str(epoch + 1) + "验证集准确率:")
ans_test.writelines(str([acc_test, TP_test, FP_test, TN_test, FN_test]))
ans_test.writelines(['/n'])
ans_test.flush()
ans_test.writelines(" loss值记录:")
ans_test.writelines(str([loss_change]))
ans_test.close()
time_end = time.time()
print('totally time cost', time_end - time_start)
def list_avg_distance(list):
sum = 0
for i in range(len(list)):
sum = sum + len(list[i][0])
return sum / len(list)
dev_inputs, dev_targets = test_inputs, test_targets
bert_classifier_model = torch.load('D:/ajiangj/exp:2---learning/model:100%/twinning_test_sighan16.pkl')
bert_classifier_model.to(device)
acc_dev_s100, TP_dev_s100, FP_dev_s100, TN_dev_s100, FN_dev_s100, cla_index_dev_s100, y_true_s100, y_pre_s100 = evaluate_Model_list(
dev_inputs, dev_targets)
index_TP_s100 = cla_index_dev_s100[0][0]
index_FP_s100 = cla_index_dev_s100[0][1]
index_TN_s100 = cla_index_dev_s100[0][2]
index_FN_s100 = cla_index_dev_s100[0][3]
sentence_TP_s100 = [test_data[i] for i in index_TP_s100]
# 30.36938775510204
sentence_FP_s100 = [test_data[i] for i in index_FP_s100]
# 31.048780487804876
sentence_TN_s100 = [test_data[i] for i in index_TN_s100]
# 33.8
sentence_FN_s100 = [test_data[i] for i in index_FN_s100]
# 30.405152224824356
bert_classifier_model = torch.load('D:/ajiangj/exp:1---pair-v1/model:5%/twinning_test_sighan15.pkl')
bert_classifier_model.to(device)
acc_dev_p005, TP_dev_p005, FP_dev_p005, TN_dev_p005, FN_dev_p005, cla_index_dev_p005, y_true_p005, y_pre_p005 = evaluate_Model_list(
dev_inputs, dev_targets)
index_TP_p005 = cla_index_dev_p005[0][0]
index_FP_p005 = cla_index_dev_p005[0][1]
index_TN_p005 = cla_index_dev_p005[0][2]
index_FN_p005 = cla_index_dev_p005[0][3]
sentence_TP_p005 = [test_data[i] for i in index_TP_p005]
# 30.073752711496745
sentence_FP_p005 = [test_data[i] for i in index_FP_p005]
# 30.263157894736842
sentence_TN_p005 = [test_data[i] for i in index_TN_p005]
# 34.21348314606742
sentence_FN_p005 = [test_data[i] for i in index_FN_p005]
# 30.678100263852244
bert_classifier_model = torch.load('D:/ajiangj/exp:2---learning/model:5%/twinning_test_sighan15.pkl')
bert_classifier_model.to(device)
acc_dev_s005, TP_dev_s005, FP_dev_s005, TN_dev_s005, FN_dev_s005, cla_index_dev_s005, y_true_s005, y_pre_s005 = evaluate_Model_list(
dev_inputs, dev_targets)
index_TP_s005 = cla_index_dev_s005[0][0]
index_FP_s005 = cla_index_dev_s005[0][1]
index_TN_s005 = cla_index_dev_s005[0][2]
index_FN_s005 = cla_index_dev_s005[0][3]
sentence_TP_s005 = [test_data[i] for i in index_TP_s005]
# 29.98
sentence_FP_s005 = [test_data[i] for i in index_FP_s005]
# 31.033834586466167
sentence_TN_s005 = [test_data[i] for i in index_TN_s005]
# 29.98
sentence_FN_s005 = [test_data[i] for i in index_FN_s005]
# 30.095070422535212
# read & write txt
a = copy.deepcopy(index_FN_p005)
b = copy.deepcopy(index_FN_s005)
print(len(a))
print(len(b))
# 379
# 284
# intersection--246
intersection = [v for v in a if v in b]
# union
union = b.extend([v for v in a])
# difference--133
differenc = [v for v in a if v not in b]
kkk = [test_data[i] for i in differenc]
# read & write txt
ans = open("成对的5%-错误的句子判断错误.txt", "w")
ans.writelines(str(sentence_FN_p005))
ans.close
ans = open("单个的5%-错误的句子判断错误.txt", "w")
ans.writelines(str(sentence_FN_s005))
ans.close
ans = open("成对的减去单个的判断结果.txt", "w")
ans.writelines(str(kkk))
ans.close
'''
# eval the trained model in test model
total = len(test_inputs)
hit = 0
num_wrong = 0
wrong_judge_sentence = []
right_judge_sentence = []
with torch.no_grad():
for i in range(total):
outputs = bert_classifier_model([test_inputs[i]])
_, predicted = torch.max(outputs, 1)
wrong_temp = []
right_temp = []
if predicted == test_targets[i]:
hit += 1
#right_temp.append(test_inputs[i])
# right_temp.append(test_targets[i])
#right_judge_sentence.append(right_temp)
else:
num_wrong += 1
#wrong_temp.append(test_inputs[i])
#wrong_temp.append(test_targets[i])
#wrong_judge_sentence.append(wrong_temp)
hit + num_wrong == total
print("test_Accuracy: %.2f%%" % (hit / total * 100))
#print("判断错误的句子(#1是正确的,0是错误的)&返回句子本身的对错:\n",wrong_judge_sentence)
#print("\n\n\n")
#print("判断正确的句子(#1是正确的,0是错误的)&返回句子本身的对错:\n",right_judge_sentence)
#read & write txt
#ans = open("ans666.txt","w")
#ans.writelines(str(wrong_judge_sentence))
#ans.writelines(str(right_judge_sentence))
#ans.close
print("test_Accuracy: %.2f%%" % (hit / total * 100))
time_end=time.time()
print('totally time cost',time_end-time_start)
#test_Accuracy: 49.77%
#
# 保存
#torch.save(bert_classifier_model, 'D:/ajiangj/spelling error/model'+str(i)+'.pkl')
# 加载
#测试
aaa = np.array(validation)#np.ndarray()
bbb=aaa.tolist()#list
bert_classifier_model = torch.load(path+'/model_gpu19.pkl')
acc_dev, TP_dev, FP_dev, TN_dev, FN_dev, cla_index_dev= evaluate_Model(dev_inputs[0:200], dev_targets[0:200])
index_TP = cla_index_dev[0][0]
index_FP = cla_index_dev[0][1]
index_TN = cla_index_dev[0][2]
index_FN = cla_index_dev[0][3]
sentence_TP = [bbb[i]for i in index_TP]
sentence_FP = [bbb[i]for i in index_FP]
sentence_TN = [bbb[i]for i in index_TN]
sentence_FN = [bbb[i]for i in index_FN]
#read & write txt
with open("sentence_TP.txt", 'w') as f:
f.write(str(sentence_TP))
with open("sentence_FP.txt", 'w') as f:
f.write(str(sentence_FP))
with open("sentence_TN.txt", 'w') as f:
f.write(str(sentence_TN))
with open("sentence_FN.txt", 'w') as f:
f.write(str(sentence_FN))
'''
``` |
{
"source": "JiangJohnny/resolvelib",
"score": 2
} |
#### File: functional/python/test_resolvers_python.py
```python
from __future__ import print_function
import collections
import json
import operator
import os
import packaging.markers
import packaging.requirements
import packaging.specifiers
import packaging.utils
import packaging.version
import pytest
from resolvelib import AbstractProvider, ResolutionImpossible, Resolver
Candidate = collections.namedtuple("Candidate", "name version extras")
def _eval_marker(marker, extras=(None,)):
if not marker:
return True
if not isinstance(marker, packaging.markers.Marker):
marker = packaging.markers.Marker(marker)
return any(marker.evaluate({"extra": extra}) for extra in extras)
def _iter_resolved(data):
for k, v in data.items():
if not isinstance(v, dict):
v = {"version": v}
yield k, v
class PythonInputProvider(AbstractProvider):
def __init__(self, filename):
with open(filename) as f:
case_data = json.load(f)
index_name = os.path.normpath(
os.path.join(
filename, "..", "..", "index", case_data["index"] + ".json"
),
)
with open(index_name) as f:
self.index = json.load(f)
self.root_requirements = [
packaging.requirements.Requirement(r)
for r in case_data["requested"]
]
if "resolved" in case_data:
self.expected_resolution = {
k: packaging.version.parse(v["version"])
for k, v in _iter_resolved(case_data["resolved"])
if _eval_marker(v.get("marker"))
}
else:
self.expected_resolution = None
if "conflicted" in case_data:
self.expected_confliction = set(case_data["conflicted"])
else:
self.expected_confliction = None
def identify(self, requirement_or_candidate):
name = packaging.utils.canonicalize_name(requirement_or_candidate.name)
if requirement_or_candidate.extras:
extras_str = ",".join(sorted(requirement_or_candidate.extras))
return "{}[{}]".format(name, extras_str)
return name
def get_preference(self, identifier, resolutions, candidates, information):
transitive = all(p is not None for _, p in information[identifier])
return (transitive, identifier)
def _iter_matches(self, identifier, requirements, incompatibilities):
name, _, _ = identifier.partition("[")
bad_versions = {c.version for c in incompatibilities[identifier]}
extras = {e for r in requirements[identifier] for e in r.extras}
for key, value in self.index[name].items():
v = packaging.version.parse(key)
if any(v not in r.specifier for r in requirements[identifier]):
continue
if v in bad_versions:
continue
yield Candidate(name=name, version=v, extras=extras)
def find_matches(self, identifier, requirements, incompatibilities):
candidates = sorted(
self._iter_matches(identifier, requirements, incompatibilities),
key=operator.attrgetter("version"),
reverse=True,
)
return candidates
def is_satisfied_by(self, requirement, candidate):
return candidate.version in requirement.specifier
def _iter_dependencies(self, candidate):
name = packaging.utils.canonicalize_name(candidate.name)
if candidate.extras:
r = "{}=={}".format(name, candidate.version)
yield packaging.requirements.Requirement(r)
for r in self.index[name][str(candidate.version)]["dependencies"]:
requirement = packaging.requirements.Requirement(r)
if not _eval_marker(requirement.marker, candidate.extras):
continue
yield requirement
def get_dependencies(self, candidate):
return list(self._iter_dependencies(candidate))
INPUTS_DIR = os.path.abspath(os.path.join(__file__, "..", "inputs"))
CASE_DIR = os.path.join(INPUTS_DIR, "case")
CASE_NAMES = [name for name in os.listdir(CASE_DIR) if name.endswith(".json")]
XFAIL_CASES = {
"pyrex-1.9.8.json": "Too many rounds (>500)",
"same-package-extras.json": "State not cleaned up correctly",
}
@pytest.fixture(
params=[
pytest.param(
os.path.join(CASE_DIR, n),
marks=pytest.mark.xfail(strict=True, reason=XFAIL_CASES[n]),
)
if n in XFAIL_CASES
else os.path.join(CASE_DIR, n)
for n in CASE_NAMES
],
ids=[n[:-5] for n in CASE_NAMES],
)
def provider(request):
return PythonInputProvider(request.param)
def _format_confliction(exception):
return {
packaging.utils.canonicalize_name(cause.requirement.name)
for cause in exception.causes
}
def _format_resolution(result):
return {
identifier: candidate.version
for identifier, candidate in result.mapping.items()
if not candidate.extras
}
def test_resolver(provider, reporter):
resolver = Resolver(provider, reporter)
if provider.expected_confliction:
with pytest.raises(ResolutionImpossible) as ctx:
result = resolver.resolve(provider.root_requirements)
print(_format_resolution(result)) # Provide some debugging hints.
assert _format_confliction(ctx.value) == provider.expected_confliction
else:
resolution = resolver.resolve(provider.root_requirements)
assert _format_resolution(resolution) == provider.expected_resolution
``` |
{
"source": "JiangJQ2000/montepython",
"score": 2
} |
#### File: python/tools/prepare_wmap.py
```python
import sys
sys.path = ["REPLACEPATH"]+sys.path
import numpy as nm
import clik.parobject as php
import clik
import re
import os.path as osp
def main(argv):
pars = clik.miniparse(argv[1])
try:
test_cl = nm.loadtxt(osp.join(pars.wmap_data,"data/test_cls_v4.dat"))
except IOError as e:
test_cl = nm.loadtxt(osp.join(pars.wmap_data,"data/test_cls_v5.dat"))
mcl = nm.zeros((4,1201),dtype=nm.double)
llp1s2pi = nm.arange(1201)*nm.arange(1,1202)/2./nm.pi
mcl[:,2:] = (test_cl[:1201-2,1:5].T)/llp1s2pi[2:]
ttmin = max(2,pars.int.ttmin)
ttmax = min(1200,pars.int.ttmax)
temin = max(2,pars.int.temin)
temax = min(800,pars.int.temax)
has_tt = True
has_te = True
if pars.int.ttmin>pars.int.ttmax:
ttmin = 1201
ttmax = 2
has_tt = False
if pars.int.temin>pars.int.temax:
temin = 801
temax = 2
has_te = False
#print has_tt,has_te,ttmin,ttmax,temin,temax
root_grp,hf = php.baseCreateParobject(pars.res_object)
hascl = [0]*6
hascl[0] = has_tt
hascl[1:4] = [has_te]*3
hascl = nm.array(hascl,dtype=nm.int)
#print hascl
lmin = 0
lmax = min(1200,max(pars.int.ttmax,pars.int.temax))
mcl = (nm.compress(hascl[:4],mcl[:,:lmax+1],0)).flat[:]
lkl_grp = php.add_lkl_generic(root_grp,"wmap",1,hascl,lmax,lmin)
lkl_grp.attrs["ttmin"] = ttmin
lkl_grp.attrs["temin"] = temin
lkl_grp.attrs["ttmax"] = ttmax
lkl_grp.attrs["temax"] = temax
lkl_grp.attrs["use_gibbs"] = pars.int.use_gibbs
lkl_grp.attrs["use_lowl_pol"] = pars.int.use_lowl_pol
#lkl_grp.attrs["external_dir"] = osp.realpath(pars.wmap_data)
php.add_external_data(osp.realpath(pars.wmap_data),lkl_grp,tar=bool(pars.int(default=1).include))
hf.close()
if hasattr(clik,"clik"):
res = php.add_selfcheck(pars.res_object,mcl)
print("lkl for init cl %g"%res)
if "cl_save" in pars:
f=open(pars.cl_save,"w")
for ci in mcl:
print(ci, file=f)
f.close()
import sys
if __name__=="__main__":
main(sys.argv)
```
#### File: plc-3.01/waf_tools/any_lapack.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import autoinstall_lib as atl
from waflib import Logs
import os.path as osp
from waflib import Logs
from waflib import Context
from waflib import Errors
import sys
import waflib
version = "lapack-3.3.1"
tool = "lapack-3.3.1"
lapack_funcs= " ".join(open("waf_tools/lapack_funcs.txt").read().strip().split())
#lapack_funcs = "dposv dtrsv dpotrf dpotrs dpotri dtrtri dtrmm dtrmv dgeqrf dormqr dsyev dgesvd dsymv dgemv dgemm dsyrk dsyr2k daxpy dtrsm dsymm dsyr ddot dsyevd dlamch dsyevr"
def options(ctx):
atl.add_lib_option("lapack",ctx,install=True)
grp = ctx.parser.get_option_group("--lapack_install")
grp.add_option("--lapack_mkl",action="store",default="",help="if lapack is mkl, location of the mkl install")
grp.add_option("--lapack_mkl_version",action="store",default="10.3",help="only needed if version of the mkl library is lower than 10.3 (could be 10.2, 10.1 or 10.0)")
grp.add_option("--lapack_apple",action="store_true",default=sys.platform.lower()=="darwin",help="use apple version of blas/lapack")
def do_include(ctx,ptrn="%s_"):
f=open(osp.join(ctx.env.PREFIX,"include/lapack_clik.h"),"w")
for fnc in lapack_funcs.split():
print(("#define %s "+ptrn)%(fnc,fnc), file=f)
print(extra_inc, file=f)
f.close()
def configure(ctx):
#always assume that I need a dedicated include file.
if ctx.options.lapack_apple:
ctx.start_msg("Check apple lapack")
if sys.platform.lower()!="darwin":
ctx.end_msg("not on darwin ! Got '%s'"%sys.platform,color="YELLOW")
raise Errors.WafError("cannot find apple lapack")
ctx.end_msg("ok")
lapack_extradefs = ["HAS_LAPACK"]
lapack_libs = ["BLAS","LAPACK"]
lapack_includes = ["lapack_clik.h"]
lapack_extradefs += ["LAPACK_CLIK"]
ctx.options.lapack_include = osp.join(ctx.env.PREFIX,"include")
ctx.options.lapack_lib = "/System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Versions/Current"
do_include(ctx,"%s_")
elif ctx.options.lapack_mkl:
# parse version
ctx.start_msg("Check mkl version")
if ctx.options.lapack_mkl_version.strip()[:4] not in ("10.0","10.1","10.2","10.3"):
ctx.end_msg(ctx.options.lapack_mkl_version.strip()+" not understood, 10.3 library recipe should be ok",color="YELLOW")
raise Errors.WafError("Cannot understand mkl version '%s'"%ctx.options.lapack_mkl_version.strip())
version = int(ctx.options.lapack_mkl_version.strip()[:4].split(".")[1])
ctx.end_msg("10.%d"%version)
lapack_extradefs = ["HAS_LAPACK"]
lapack_extradefs += ["HAS_MKL"]
lapack_includes = ["mkl_lapack.h","mkl_blas.h"]
lapack_libs = []
tag = sys.platform.lower()
if tag=="darwin":
pass
elif "linux" in tag:
tag="linux"
else:
raise Errors.WafError("unknown platform '%s'"%tag)
tag+="_10.%d"%version
mopt = ctx.env.mopt
if ("32" in " ".join(mopt)) or ("i386" in " ".join(mopt)):
tag+="_32"
else:
tag +="_64"
if sys.platform.lower()!='darwin':
#I need to create my own lapack !
cmdline = """gcc -shared -Bdynamic %(func_list)s -Wl,--start-group %(ars)s -Wl,--end-group %(Lomp)s %(omp)s -o "%(res)s" """
cmdlist = {}
cmdlist["func_list"] = " ".join(["-u %s_"%v for v in lapack_funcs.split()])
cmdlist["ars"] = " ".join([osp.join(mkl_options[tag][0]%(ctx.options.lapack_mkl),"lib%s.a"%v.strip()) for v in mkl_options[tag][1].split("-l") if v.strip() and v.strip()[:3]=="mkl"])
cmdlist["Lomp"] = " ".join("-L%s"%v.strip() for v in ctx.env.LIBPATH_fc_runtime if v.strip())
cmdlist["omp"] = " ".join([v.strip() for v in mkl_options[tag][1].split() if v.strip() and "mkl" not in v])
cmdlist["res"] = osp.join(ctx.env.LIBDIR,ctx.env.cshlib_PATTERN%"clik_mkl")
cmdline = cmdline%cmdlist
#print cmdline
ctx.start_msg("create specific mkl lib")
llgo,llge = ctx.cmd_and_log(cmdline, output=waflib.Context.BOTH)
#print llgo
#print llge
ctx.end_msg(cmdlist["res"])
ctx.options.lapack_link = "-lclik_mkl "+cmdlist["omp"]
ctx.options.lapack_lib = ctx.env.LIBDIR+":".join([""]+ctx.env.LIBPATH_fc_runtime)
ctx.options.lapack_include = ctx.options.lapack_mkl+"/include"
else:
ctx.options.lapack_link = mkl_options[tag][1]
ctx.options.lapack_lib = mkl_options[tag][0]%(ctx.options.lapack_mkl)+":".join([""]+ctx.env.LIBPATH_fc_runtime)
if "framework" in ctx.options.lapack_mkl.lower():
ctx.options.lapack_include = ctx.options.lapack_mkl+"/Headers"
else:
ctx.options.lapack_include = ctx.options.lapack_mkl+"/include"
#try:
# atl.conf_lib(ctx,"lapack",lapack_libs,lapack_funcs.split(),lapack_includes,defines=lapack_extradefs,install=installlapack)
#except Exception,e:
# pass
#lapack_extradefs = ["HAS_LAPACK"]
#lapack_libs = ["BLAS","LAPACK"]
#lapack_includes = ["lapack.h","blas.h"]
#if "mkl" in ctx.options.lapack_lib.lower() or "mkl" in ctx.options.lapack_include.lower() or "mkl" in ctx.options.lapack_link or ctx.options.lapack_mkl:
# ctx.env.mkl = True
# lapack_extradefs += ["HAS_MKL"]
# lapack_includes = ["mkl_lapack.h","mkl_blas.h"]
# if ctx.options.lapack_mkl:
# if ctx.env.has_ifort==False:
# raise Exception("cannot use MKL without ifort")
# if "framework" in ctx.options.lapack_mkl.lower():
# # guess we are on macosx
# # get the path of the framework
# if ctx.options.lapack_mkl[-1] == "/":
# fpath,fname = osp.split(ctx.options.lapack_mkl[:-1])
# else:
# fpath,fname = osp.split(ctx.options.lapack_mkl)
# fname = fname.split(".")[0]
# ctx.options.lapack_include = ctx.options.lapack_mkl+"/Headers"
# ctx.options.lapack_lib = ctx.options.lapack_mkl+"/Libraries/universal"
# if ctx.options.lapack_link=="":
# ctx.options.lapack_link = "-lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core"
# else:
# # assume it's 10 on linux
# # check whether it's 10.3
# if ctx.options.m32:
# libsuffix="/lib/32"
# libdep = "-lmkl_intel"
# else:
# libsuffix="/lib/em64t"
# libdep = "-lmkl_intel_lp64"
# if ctx.options.lapack_link=="":
# ctx.options.lapack_link = "-lmkl_lapack -lmkl_intel_thread -lmkl_core -liomp5 -lm -lpthread -lmkl_def" + libdep
# if not ctx.options.m32 and osp.exists(ctx.options.lapack_mkl+"/lib/intel64"):
# libsuffix="/lib/intel64"
# ctx.options.lapack_link = "-lmkl_intel_thread -lmkl_core -liomp5 -lm -lpthread -lmkl_def" + libdep
# ctx.options.lapack_include=ctx.options.lapack_mkl+"/include"
# ctx.options.lapack_lib=ctx.options.lapack_mkl+libsuffix+":".join([""]+ctx.env.LIBPATH_fc_runtime)
elif atl.upgrade(ctx,"lapack") or ctx.options.lapack_islocal or ctx.options.lapack_forceinstall or atl.shouldIinstall_all(ctx,"lapack"):
ctx.env.append_value("LIBPATH_lapack",ctx.env.LIBPATH_fc_runtime)
ctx.env.append_value("RPATH_lapack",ctx.env.RPATH_fc_runtime)
ctx.env.append_value("LIB_lapack",ctx.env.LIB_fc_runtime)
lapack_libs = ["lapack_clik","blas_clik"]
lapack_includes = ["lapack_clik.h"]
lapack_extradefs = ["HAS_LAPACK"]
lapack_extradefs += ["LAPACK_CLIK"]
else:
lapack_libs = []
lapack_includes = ["lapack_clik.h"]
ctx.options.lapack_include = osp.join(ctx.env.PREFIX,"include")
lapack_libs = ["lapack","blas"]
lapack_extradefs = ["HAS_LAPACK"]
lapack_extradefs += ["LAPACK_CLIK"]
do_include(ctx)
atl.conf_lib(ctx,"lapack",lapack_libs,lapack_funcs.split(),lapack_includes,defines=lapack_extradefs,install=installlapack)
def unlist(wh):
if isinstance(wh,str):
return wh
return " ".join(wh)
def installlapack(ctx):
filen = version+".tgz"
atl.installsmthg_pre(ctx,"http://www.netlib.org/lapack/"+filen,filen)
from waflib import Utils,Errors
dii = {"FCC":unlist(ctx.env.FC),"FCFLAGS":" ".join(ctx.env.FCFLAGS+ctx.env.FCFLAGS_fcshlib),"FLINKFLAGS":" ".join(ctx.env.FCFLAGS+ctx.env.LINKFLAGS_fcshlib),"SO":ctx.env.shsuffix,"MFLAG":" ".join(ctx.env.FCFLAGS) }
Logs.pprint("PINK","build blas")
f=open("build/%s/make.inc"%version,"w")
print(make_inc_blas%dii, file=f)
f.close()
cmdline = "cd build/%s; make blaslib"%version
if ctx.exec_command(cmdline)!=0:
raise Errors.WafError("Cannot build %s"%version)
Logs.pprint("PINK","build lapack")
f=open("build/%s/make.inc"%version,"w")
print(make_inc_lapack%dii, file=f)
f.close()
cmdline = "cd build/%s; make lapacklib"%version
if ctx.exec_command(cmdline)!=0:
raise Errors.WafError("Cannot build %s"%version)
import shutil
shutil.copyfile("build/%s/liblapack_clik.%s"%(version,ctx.env.shsuffix), osp.join(ctx.env.LIBDIR,"liblapack_clik.%s"%ctx.env.shsuffix))
shutil.copyfile("build/%s/libblas_clik.%s"%(version,ctx.env.shsuffix), osp.join(ctx.env.LIBDIR,"libblas_clik.%s"%ctx.env.shsuffix))
do_include(ctx)
make_inc_lapack="""
SHELL = /bin/sh
FORTRAN = %(FCC)s %(FCFLAGS)s
OPTS =
DRVOPTS = $(OPTS)
NOOPT = -g -O0
TIMER = INT_CPU_TIME
LOADER = %(FCC)s
LOADOPTS = %(MFLAG)s
BLASLIB = ../../libblas_clik.%(SO)s
ARCH = %(FCC)s
ARCHFLAGS = %(FLINKFLAGS)s -L../ -lblas_clik -o
RANLIB = echo
LAPACKLIB = liblapack_clik.%(SO)s
"""
make_inc_blas="""
SHELL = /bin/sh
FORTRAN = %(FCC)s %(FCFLAGS)s
OPTS =
DRVOPTS = $(OPTS)
NOOPT = -g -O0
TIMER = INT_CPU_TIME
BLASLIB = ../../libblas_clik.%(SO)s
ARCH = %(FCC)s
ARCHFLAGS = %(FLINKFLAGS)s -o
RANLIB = echo
LAPACKLIB = liblapack_clik.%(SO)s
"""
extra_inc = """
void dtrsv(const char *uplo, const char *trans, const char *diag, const int *n,
const double *a, const int *lda, double *x, const int *incx);
void dpotrf( char* uplo, int * n, double* a, int * lda, int * info );
void dpotri( char* uplo, int * n, double* a, int * lda, int * info );
void dgemv(const char *trans, const int *m, const int *n, const double *alpha,
const double *a, const int *lda, const double *x, const int *incx,
const double *beta, double *y, const int *incy);
void dsyrk(const char *uplo, const char *trans, const int *n, const int *k,
const double *alpha, const double *a, const int *lda, const double *beta,
double *c, const int *ldc);
void dsyr2k(const char *uplo, const char *trans, const int *n, const int *k,
const double *alpha, const double *a, const int *lda, const double *b, const int *ldb,
const double *beta, double *c, const int *ldc);
void dgesvd( char* jobu, char* jobvt, int * m, int * n, double* a, int * lda, double* s, double* u, int * ldu, double* vt, int * ldvt, double* work, int * lwork, int * info );
void dgemm(const char *transa, const char *transb, const int *m, const int *n, const int *k,
const double *alpha, const double *a, const int *lda, const double *b, const int *ldb,
const double *beta, double *c, const int *ldc);
void dtrtri( char* uplo, char* diag, int * n, double* a, int * lda, int * info );
void dtrmm(const char *side, const char *uplo, const char *transa, const char *diag,
const int *m, const int *n, const double *alpha, const double *a, const int *lda,
double *b, const int *ldb);
void dtrmv(const char *uplo, const char *transa, const char *diag, const int *n,
const double *a, const int *lda, double *b, const int *incx);
void dgeqrf( int * m, int * n, double* a, int * lda, double* tau, double* work, int * lwork, int * info );
void dormqr( char* side, char* trans, int * m, int * n, int * k, double* a, int * lda, double* tau, double* c, int * ldc, double* work, int * lwork, int * info );
void dsyev( char* jobz, char* uplo, int * n, double* a, int * lda, double* w, double* work, int * lwork, int * info );
void dsymv(const char *uplo, const int *n, const double *alpha, const double *a, const int *lda,
const double *x, const int *incx, const double *beta, double *y, const int *incy);
void daxpy(const int *n, const double *alpha, const double *x, const int *incx, double *y, const int *incy);
void dtrsm(const char *side, const char *uplo, const char *transa, const char *diag,
const int *m, const int *n, const double *alpha, const double *a, const int *lda,
double *b, const int *ldb);
void dsyr(const char *uplo, const int *n, const double *alpha, const double *x, const int *incx,
double *a, const int *lda);
void dsymm(const char *side, const char *uplo, const int *m, const int *n,
const double *alpha, const double *a, const int *lda, const double *b, const int *ldb,
const double *beta, double *c, const int *ldc);
double ddot(int* N,double *DX, int* INCX,double *DY,int* INCY);
void dpotrs(char* UPLO,int * N,int * NRHS,double* A,int* LDA,double* B,int* LDB,int* INFO );
void dsyevd(char *jobz, char *uplo, int *n, double *a, int *lda, double *w, double *work, int *lwork, int *iwork, int *liwork, int *info);
void dposv( char* UPLO, int* N, int* NRHS, double* A, int* LDA, double* B, int* LDB, int* INFO );
double dlamch_(char *cmach);
void dsyevr(char jobz, char range, char uplo, int n, double
*a, int lda, double vl, double vu, int il, int iu,
double abstol, int *m, double *w, double *z, int
ldz, int *isuppz, int *info);
"""
mkl_options = {
"darwin_10.3_64":("%s/lib","-lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm"),
"darwin_10.2_64":("%s/lib/em64t","-lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm"),
"darwin_10.1_64":("%s/lib/em64t","-lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm"),
"darwin_10.0_64":("%s/lib/em64t","-lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm"),
"linux_10.0_64" :("%s/lib/em64t","-lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm"),
"linux_10.1_64" :("%s/lib/em64t","-lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm"),
"linux_10.0_32" :("%s/lib/32","-lmkl_intel -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm"),
"linux_10.1_32" :("%s/lib/32","-lmkl_intel -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm"),
"linux_10.2_32" :("%s/lib/32","-lmkl_intel -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm"),
"linux_10.2_64" :("%s/lib/em64t"," -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm"),
"linux_10.3_64" :("%s/lib/intel64"," -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm"),
"linux_10.3_32" :("%s/lib/ia32"," -lmkl_intel -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm"),
}
```
#### File: plc-3.01/waf_tools/autoinstall_fftw3.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import autoinstall_lib as atl
version = "fftw-3.2.2"
tool = "fftw3"
print("-> loading %s autoinstall (using version %s)"%(tool,version))
def options(opt):
atl.add_lib_option(tool,opt,install=True)
def configure(ctx):
atl.conf_lib(ctx,tool,["fftw3"],"fftw_execute","fftw3.h",defines="HAS_FFTW3",install=installfftw3)
def installfftw3(ctx):
filen = version+".tar.gz"
atl.installsmthg_pre(ctx,"http://www.fftw.org/"+filen,filen)
atl.installsmthg_post(ctx,filen,"fftw","--enable-shared")
```
#### File: plc-3.01/waf_tools/c_openmp.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from waflib.Configure import conf
from waflib.Errors import ConfigurationError
OPENMP_CODE = '''
#ifndef _OPENMP
choke me
#endif
#include <omp.h>
int main () { return omp_get_num_threads (); }
'''
@conf
def check_openmp_cflags(self, **kw):
self.start_msg('Checking for $CC option to support OpenMP')
kw.update({'fragment': OPENMP_CODE})
try:
self.validate_c(kw)
self.run_build(**kw)
if 'define_name' in kw:
self.define(kw['define_name'], 1)
self.end_msg('None')
except ConfigurationError:
for flag in ('-fopenmp', '-xopenmp', '-openmp', '-mp', '-omp', '-qsmp=omp'):
try:
self.validate_c(kw) #refresh env
if kw['compiler'] == 'c':
kw['ccflags'] = kw['cflags'] = flag
elif kw['compiler'] == 'cxx':
kw['cxxflags'] = flag
else:
self.fatal('Compiler has to be "c" or "cxx"')
kw['linkflags'] = flag
kw['success'] = self.run_build(**kw)
self.post_check(**kw)
self.end_msg(flag)
return
except ConfigurationError:
del kw['env']
continue
self.end_msg('Not supported')
if 'define_name' in kw:
self.undefine(kw['define_name'])
if kw.get('mandatory', True):
self.fatal('OpenMP is not supported')
```
#### File: plc-3.01/waf_tools/execconfig.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import waflib.TaskGen
import waflib.Task as Task
from waflib import Utils
import waflib
import os.path as osp
import os
def uniqify(lst):
rlst = []
for v in lst:
#print v, rlst
if v in rlst:
#print "caught"
continue
rlst.append(v)
return rlst
def ptrquote(st):
res = ""
for v in st:
if v=='"':
res +='\\"'
else:
res+=v
return res
@waflib.TaskGen.feature("build_pkgconfig")
def build_pkgconfig(self):
from waflib.Tools.ccroot import USELIB_VARS
if self.flavor=='c':
USELIB_VARS['build_pkgconfig'] = set(['INCLUDES', 'DEFINES', 'CPPFLAGS', 'CFLAGS']+['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS'])
cf = ['CPPFLAGS', 'CFLAGS']
addlib = ["clik"]
else:
USELIB_VARS['build_pkgconfig'] =set(['FCFLAGS','DEFINES','INCLUDES']+['LIB','STLIB','LIBPATH','STLIBPATH','LINKFLAGS','RPATH','LINKDEPS'])
cf = ['FCFLAGS']
addlib = ["clik","clik_f90"]
#USELIB_VARS['cprogram']
self.process_use()
self.propagate_uselib_vars()
vrs = dict([(v,list((self.env[v]))) for v in USELIB_VARS['build_pkgconfig']])
includepath = ptrquote(" ".join([self.env.CPPPATH_ST%v for v in uniqify(vrs["INCLUDES"])]))
libpath = ptrquote(" ".join([self.env.LIBPATH_ST%v for v in uniqify(vrs["LIBPATH"])]))
rlibpath = ptrquote(" ".join([self.env.RPATH_ST%v for v in uniqify(vrs["RPATH"])]))
stlibpath = ptrquote(" ".join([self.env.LIBPATH_ST%v for v in uniqify(vrs["STLIBPATH"])]))
libs = ptrquote(" ".join([self.env.LIB_ST%v for v in uniqify(vrs["LIB"]+addlib)]))
stlibs = ptrquote(" ".join([self.env.STLIB_ST%v for v in uniqify(vrs["STLIB"])]))
defines = ptrquote(" ".join([self.env.DEFINES_ST%v for v in uniqify(vrs["DEFINES"])]))
cfs = []
#print cf
for tt in cf+["LINKFLAGS"]:
#print tt,vrs[tt]
cfs += vrs[tt]
#print cfs
cflags = ptrquote(" ".join(uniqify(cfs)))
#print "YEAH:"
#print includepath
#print libpath
#print rlibpath
#print stlibpath
#print libs
#print stlibs
#print cflags
#print defines
alibs = ""
if libs:
alibs += (self.env.SHLIB_MARKER or "") +" ".join([rlibpath,libpath,libs])
if stlibs:
alibs += (self.env.STLIB_MARKER or "") +" ".join([srlibpath,stlibs])
f=open(osp.join(self.env.BINDIR,self.target),"w")
print(config_tpl%(" ".join((includepath,defines,cflags)),alibs), file=f)
f.close()
os.chmod(osp.join(self.env.BINDIR,self.target),Utils.O755)
config_tpl = """#! /usr/bin/env python
# don't do much for now
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--cflags", action="store_true",
help="only the cflags")
parser.add_option("--libs", action="store_true",
help="only libflags")
(options, args) = parser.parse_args()
res={}
cflags = "%s"
libs = "%s"
if (not options.libs) and (not options.cflags):
options.libs=True
options.cflags=True
if options.cflags:
print cflags,
if options.libs:
print libs,
print
"""
@waflib.TaskGen.feature("*")
@waflib.TaskGen.before_method('process_source')
def process_execrule(self):
if not getattr(self,'execrule',None):
return
self.meths.remove('process_source')
name=str(getattr(self,'name',None)or self.target or self.execrule)
cls=Task.task_factory(name,self.execrule,getattr(self,'vars',[]),shell=getattr(self,'shell',True),color=getattr(self,'color','BLUE'))
tsk=self.create_task(name)
if getattr(self,'target',None):
if isinstance(self.target,str):
self.target=self.target.split()
if not isinstance(self.target,list):
self.target=[self.target]
for x in self.target:
if isinstance(x,str):
tsk.outputs.append(self.path.find_or_declare(x))
else:
x.parent.mkdir()
tsk.outputs.append(x)
if getattr(self,'install_path',None):
self.bld.install_files(self.install_path,tsk.outputs,chmod=Utils.O755)
if getattr(self,'source',None):
tsk.inputs=self.to_nodes(self.source)
self.source=[]
if getattr(self,'scan',None):
cls.scan=self.scan
if getattr(self,'cwd',None):
tsk.cwd=self.cwd
if getattr(self,'update_outputs',None)or getattr(self,'on_results',None):
Task.update_outputs(cls)
if getattr(self,'always',None):
Task.always_run(cls)
for x in['after','before','ext_in','ext_out']:
setattr(cls,x,getattr(self,x,[]))
```
#### File: plc-3.01/waf_tools/try_ifort.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
from waflib import Options
import os.path as osp
from waflib import Logs
from waflib import Context
from waflib import Errors
def options(ctx):
import optparse
grp = ctx.parser.get_option_group("--gcc")
if grp==None:
grp=optparse.OptionGroup(ctx.parser,"compiler options")
grp.add_option("--gfortran",action="store_true",default=False,help="Do not test for ifort and only use gfortran")
grp.add_option("--ifort",action="store_true",default=False,help="Do not test for gfortran and only use ifort")
grp.add_option("--fortran_flagline",action="store",default="",help="flagline to link fortran object to c using ld")
ctx.add_option_group(grp)
def configure_(ctx):
if ctx.options.fortran_flagline:
conf.parse_flags(ctx.options.fortran_flagline,uselib="fc_runtime")
if sys.platform.lower()=="darwin":
ctx.env.fcshlib_PATTERN = 'lib%s.dylib'
ctx.env.has_ifort = False
if not Options.options.gfortran:
try:
ifort_conf(ctx)
return
except Exception as e:
if Options.options.ifort:
raise
Logs.pprint("PINK", "ifort not found, defaulting to gfortran (cause: '%s')"%e)
gfortran_conf(ctx)
def configure(ctx):
configure_(ctx)
ctx.env.append_value("FCFLAGS_fcshlib",ctx.env.LINKFLAGS_fcshlib)
ctx.env["FCFLAGS_fpic"]=[]
ctx.env.append_value("FCFLAGS_fpic",[flg for flg in ctx.env.FCFLAGS_fcshlib if "-fpic" in flg.lower()])
#ctx.env.append_value("FCFLAGS_fpic","-fpe0")
def show_linkline(ctx):
ctx.start_msg("fortran link line")
ctx.end_msg(" ".join(["-L%s"%vv for vv in ctx.env.LIBPATH_fc_runtime])+" "+" ".join(["-l%s"%vv for vv in ctx.env.LIB_fc_runtime]))
def ifort_conf(ctx):
import waflib
import os
ctx.env.FC=[]
ctx.load('ifort')
if sys.platform.lower()=="darwin":
ctx.env.LINKFLAGS_fcshlib = ['-dynamiclib']
ctx.env.append_value('FCFLAGS',ctx.env.mopt)
ctx.env["FCFLAGS_fc_omp"]=[]
ctx.env.FCSHLIB_MARKER = [""]
ctx.env.FCSTLIB_MARKER = [""]
ctx.start_msg("Check ifort version")
v90 = ctx.cmd_and_log(" ".join(ctx.env.FC)+" --version",quiet=Context.STDOUT).split("\n")[0].strip()
v90 = v90.split("\n")[0].strip().split(" ")[2]
ctx.end_msg(v90)
ctx.env.IFORT_VERSION = v90
majver = int(v90.split(".")[0])
rl0 = []
if majver>13:
rl0 = ["irc"]
if majver>15:
ctx.env.append_value("FCFLAGS_fc_omp","-qopenmp")
else:
ctx.env.append_value("FCFLAGS_fc_omp","-openmp")
ctx.check_cc(
errmsg="failed",msg='Compile a test code with ifort',
mandatory=1,fragment = "program test\n WRITE(*,*) 'hello world'\n end program test\n",compile_filename='test.f90',features='fc fcprogram')
if not ctx.options.fortran_flagline:
ctx.start_msg("retrieve ifort link line")
try:
#print "%s %s -dryrun -dynamiclib -shared-intel -no-cxxlib dummy.f90"%(ctx.env.FC," ".join(ctx.env.FCFLAGS))
llgo,llge = ctx.cmd_and_log("%s %s -dryrun -dynamiclib -shared-intel -no-cxxlib dummy.f90"%(" ".join(ctx.env.FC)," ".join(ctx.env.FCFLAGS+ctx.env.FCFLAGS_fc_omp)), output=waflib.Context.BOTH)
#print "RET",llgo,llge
L = set([ll.strip() for ll in re.findall("^\s+-L(.+)\s*\\\\", re.split("^\s*ld\s*\\\\",llge,flags=re.M)[1],flags=re.M) if ("ifort" in ll.lower()) or ("intel" in ll.lower())])
l = set([ll.strip() for ll in re.findall("^\s+-l(.+)\s*\\\\", re.split("^\s*ld\s*\\\\",llge,flags=re.M)[1],flags=re.M)])
rL = set()
rl = set(rl0)
for Li in L:
if osp.exists(Li):
oli = os.listdir(Li)
for li in l:
if ctx.env.cshlib_PATTERN%li in oli:
rl.add(li)
rL.add(Li)
except:
ctx.end_msg(False)
raise
for pth in list(rL) + ["/lib","/lib64"]:
if osp.exists(pth):
ctx.env.append_value("LIBPATH_fc_runtime",pth)
ctx.env.append_value("RPATH_fc_runtime",pth)
ctx.env.append_value("LIB_fc_runtime",list(rl)+["pthread"])
ctx.end_msg(True)
show_linkline(ctx)
ctx.env.has_ifort = True
def ifort_conf_(ctx):
ctx.env.FC=[]
ctx.load('ifort')
if sys.platform.lower()=="darwin":
ctx.env.LINKFLAGS_fcshlib = ['-dynamiclib']
ctx.env.append_value('FCFLAGS',ctx.env.mopt)
ctx.env.append_value("FCFLAGS_fc_omp","-openmp")
ctx.env.FCSHLIB_MARKER = [""]
ctx.env.FCSTLIB_MARKER = [""]
ctx.check_cc(
errmsg="failed",msg='Compile a test code with ifort',
mandatory=1,fragment = "program test\n WRITE(*,*) 'hello world'\n end program test\n",compile_filename='test.f90',features='fc fcprogram')
if not ctx.options.fortran_flagline:
ctx.start_msg("retrieve ifort link line")
if "/" not in ctx.env.FC[0]:
ctx.env.FC = ctx.cmd_and_log("which %s"%ctx.env.FC[0]).strip()
#print ctx.env.FC
ifort_path = osp.dirname(osp.realpath(ctx.env.FC[0]))
#print ifort_path
if ctx.options.m32:
try:
f=open(osp.join(ifort_path,'ifortvars_ia32.sh'))
except:
ctx.end_msg(False)
raise Errors.WafError("Can't locate ifort configuration file")
else:
try:
f=open(osp.join(ifort_path,'ifortvars_intel64.sh'))
except:
ctx.end_msg(False)
raise Errors.WafError("Can't locate ifort configuration file")
txt = f.read()
f.close()
#print txt
if sys.platform.lower()=="darwin":
sp = "DYLD_LIBRARY_PATH"
else:
sp = "LD_LIBRARY_PATH"
res = re.findall("\s"+sp+"\s*=\s*\"(.+)\"",txt)[0]
for pth in res.split(":"):
ctx.env.append_value("LIBPATH_fc_runtime",pth)
ctx.env.append_value("RPATH_fc_runtime",pth)
ctx.env.append_value("LIB_fc_runtime",["ifcore","intlc","ifport","imf","irc","svml","iomp5","pthread"])
ctx.end_msg(True)
show_linkline(ctx)
def gfortran_conf(ctx):
ctx.env.FC=[]
ctx.env.FCFLAGS = []
ctx.load('gfortran')
ctx.env["FCFLAGS_fc_omp"]=[]
ctx.env.append_value("FCFLAGS_fc_omp","-fopenmp")
ctx.env.append_value("FCFLAGS","-DGFORTRAN")
ctx.env.append_value("FCFLAGS","-ffixed-line-length-0")
ctx.env.append_value("FCFLAGS","-ffree-line-length-0")
mopt = ctx.env.mopt
if sys.platform.lower()=="darwin":
if "i386" in ctx.env.mopt:
ctx.env.append_value('FCFLAGS','-m32')
mopt = ["-m32"]
else:
ctx.env.append_value('FCFLAGS','-m64')
mopt = ["-m64"]
else:
ctx.env.append_value('FCFLAGS',ctx.env.mopt)
ctx.start_msg("Check gfortran version")
v90 = ctx.cmd_and_log(" ".join(ctx.env.FC)+" --version",quiet=Context.STDOUT).split("\n")[0].strip()
version90 = re.findall("(4\.[0-9]\.[0-9])",v90)
if len(version90)<1:
#Logs.pprint("PINK","Can't get gfortran version... Let's hope for the best")
ctx.end_msg("not found, let's hope for the best...",color="PINK")
else:
version90 = version90[0]
vmid = int(version90.split(".")[1])
if vmid<3:
ctx.end_msg(v90,color="YELLOW")
raise Errors.WafError("gfortran version need to be above 4.3 got %s"%version90)
ctx.end_msg(v90)
# kludge !
ctx.env.FCSHLIB_MARKER = [""]
ctx.env.FCSTLIB_MARKER = mopt
ctx.check_cc(
errmsg="failed",msg='Compile a test code with gfortran',
mandatory=1,fragment = "program test\n WRITE(*,*) 'hello world'\n end program test\n",compile_filename='test.f90',features='fc fcprogram')
ctx.start_msg("retrieve gfortran link line")
lgfpath = ctx.cmd_and_log(" ".join(ctx.env.FC)+" %s -print-file-name=libgfortran.dylib"%(" ".join(mopt)),quiet=Context.STDOUT)
lpath = [osp.dirname(osp.realpath(lgfpath))]
lgfpath = ctx.cmd_and_log(" ".join(ctx.env.FC)+" %s -print-file-name=libgomp.dylib"%(" ".join(mopt)),quiet=Context.STDOUT)
lpath += [osp.dirname(osp.realpath(lgfpath))]
lpath = set(lpath)
ctx.env.append_value("LIB_fc_runtime",["gfortran","gomp"])
ctx.env.append_value("LIBPATH_fc_runtime",list(lpath))
ctx.env.append_value("RPATH_fc_runtime",list(lpath))
ctx.end_msg(True)
show_linkline(ctx)
``` |
{
"source": "Jiang-Kangkang/abess-test_python_whl_install",
"score": 2
} |
#### File: Jiang-Kangkang/abess-test_python_whl_install/mytest.py
```python
import sys
import numpy as np
from abess import *
np.random.seed(2)
n = 100
p = 20
k = 3
family = "gaussian"
rho = 0.1
data = make_glm_data(family=family, n=n, p=p, k=k, rho=rho)
def assert_reg(coef):
if (sys.version_info[0] < 3 or sys.version_info[1] < 6):
return
nonzero = np.nonzero(coef)[0]
new_x = data.x[:, nonzero]
reg = LinearRegression()
reg.fit(new_x, data.y.reshape(-1))
assert_value(coef[nonzero], reg.coef_)
# null
model1 = abessLm()
model1.fit(data.x, data.y)
# predict
y = model1.predict(data.x)
# score
score = model1.score(data.x, data.y)
``` |
{
"source": "JiangKlijna/DesignPattern",
"score": 3
} |
#### File: DesignPattern/CompositePattern/Composite.py
```python
class Component:
def __init__(self, strName):
self.m_strName = strName
def Add(self,com):
pass
def Display(self,nDepth):
pass
class Leaf(Component):
def Add(self, com):
print("leaf can't add")
def Display(self, nDepth):
strtemp = ""
for i in range(nDepth):
strtemp = strtemp + "-"
strtemp = strtemp + self.m_strName
print(strtemp)
class Composite(Component):
def __init__(self, strName):
self.m_strName = strName
self.c = []
def Add(self, com):
self.c.append(com)
def Display(self, nDepth):
strtemp = ''
for i in range(nDepth):
strtemp = strtemp + "-"
strtemp = strtemp + self.m_strName
print(strtemp)
[com.Display(nDepth + 2) for com in self.c]
if __name__ == "__main__":
p = Composite("Jiang")
p.Add(Leaf("Klijna"))
p.Add(Leaf("Ling"))
p1 = Composite("Hei")
p1.Add(Leaf("Shi"))
p.Add(p1)
p.Display(1);
```
#### File: DesignPattern/DecoratorPattern/Decorator.py
```python
class foo(object):
def f1(self):
print("original f1")
def f2(self):
print("original f2")
class foo_decorator(object):
def __init__(self, decoratee):
self._decoratee = decoratee
def f1(self):
print("decorated f1")
self._decoratee.f1()
def __getattr__(self, name):
return getattr(self._decoratee, name)
u = foo()
v = foo_decorator(u)
v.f1()
v.f2()
```
#### File: DesignPattern/SingletonPattern/Singleton.py
```python
class Singleton(object):
__instance = None
def __new__(cls):
#if not hasattr(cls, 'instance'):
# cls.instance = super(Singleton, cls).__new__(cls)
#return cls.instance
if not cls.__instance:
cls.__instance = super(Singleton, cls).__new__(cls)
return cls.__instance
s = Singleton()
s1 = Singleton()
print("Object created", s, s1)
print(s == s1)
```
#### File: DesignPattern/VisitorPattern/Visitor.py
```python
class Node(object):
pass
class A(Node):
pass
class B(Node):
pass
class C(A, B):
pass
class Visitor(object):
def visit(self, node, *args, **kwargs):
meth = None
for cls in node.__class__.__mro__:
meth_name = 'visit_'+cls.__name__
meth = getattr(self, meth_name, None)
if meth:
break
if not meth:
meth = self.generic_visit
return meth(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
print('generic_visit '+node.__class__.__name__)
def visit_B(self, node, *args, **kwargs):
print('visit_B '+node.__class__.__name__)
a = A()
b = B()
c = C()
visitor = Visitor()
visitor.visit(a)
visitor.visit(b)
visitor.visit(c)
``` |
{
"source": "JiangKui007/qingcloud-cli-mini",
"score": 2
} |
#### File: cli/instance_op/__init__.py
```python
from cli.instance_op import run
from cli.instance_op import describe
from cli.instance_op import terminate
class ActionManager(object):
@classmethod
def get_action(cls, action):
return cls.action_table.get(action)
@classmethod
def get_valid_actions(cls):
return sorted(ActionManager.action_table.keys())
action_table = {'describe-instances': describe.DescribeInstancesAction,
'terminate-instances': terminate.TerminateInstancesAction,
'run-instances': run.RunInstancesAction}
```
#### File: cli/instance_op/instance_op_test.py
```python
import unittest
from unittest import mock
import cli.instance_op
from cli.instance_op import ActionManager, describe
def mock_action_table():
di = mock.Mock(return_value='describe-instances')
ti = mock.Mock(return_value='terminate-instances')
ri = mock.Mock(return_value='run-instances')
ActionManager.action_table = {
'run-instances': ri,
'terminate-instances': ti,
'describe-instances': di
}
return di, ti, ri, ActionManager.action_table
class TestActionManager(unittest.TestCase):
def test_get_action(self):
di, ti, ri, ActionManager.action_table = mock_action_table()
self.assertEqual(ActionManager.get_action('describe-instances'), di)
self.assertEqual(ActionManager.get_action('run-instances'), ri)
self.assertEqual(ActionManager.get_action('terminate-instances'), ti)
def test_get_valid_actions(self):
di, ti, ri, ActionManager.action_table = mock_action_table()
self.assertEqual(ActionManager.get_valid_actions(),
sorted(['describe-instances', 'run-instances', 'terminate-instances']))
```
#### File: cli/instance_op/terminate.py
```python
from cli.connection import explode_array
from .base import BaseAction
class TerminateInstancesAction(BaseAction):
action = 'TerminateInstances'
command = 'terminate-instances'
usage = '%(prog)s -i "instance_id,..." ' \
'See: https://docs.qingcloud.com/product/api/action/instance/terminate_instances.html'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-i', '--instances', dest='instances',
action='store', type=str, default='',
help='the comma separated IDs of instances you want to terminate.')
parser.add_argument('-d', '--direct_cease', dest='direct_cease',
action='store', type=str, default='',
help='the comma separated IDs of instances you want to terminate.')
return parser
@classmethod
def build_directive(cls, options):
required_params = {
'zone': options.zone,
}
for param in required_params:
if required_params[param] is None or required_params[param] == '':
print('error: [%s] should be specified' % param)
return None
instances = explode_array(options.instances)
if not instances:
print('error: [instances] should be specified')
return None
return {'instances': instances,
'direct_cease': options.direct_cease,
'zone': options.zone
}
``` |
{
"source": "jianglangcaisheng/answer_AI",
"score": 3
} |
#### File: jianglangcaisheng/answer_AI/match_answer.py
```python
from skimage import io
import os
import numpy as np
DEBUG = 0
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
if DEBUG:
print(BASE_DIR)
PICS_DIR = os.path.join(BASE_DIR,"..\\pics\\test_match")
if DEBUG:
print(PICS_DIR)
GREY = [247, 247, 247]
GREEN = [148, 211, 77]
WHITE = [255, 255, 255]
vertex_top = 1233
vertex_left = 174
box_width_all = 735
box_height_all = 112
start_top = 1257
start_left = 352
box_width = int(735 / 2)
box_height = int(112 * 2/3)
interval_height = int((1738 - 1233) / 3)
question_pos = [1054, 1215, 59, 1000]
def crop_answer(whole_img):
answer_1 = whole_img[start_top+interval_height*0:start_top+box_height+interval_height*0, start_left:start_left+box_width, 0:3]
answer_2 = whole_img[start_top+interval_height*1:start_top+box_height+interval_height*1, start_left:start_left+box_width, 0:3]
answer_3 = whole_img[start_top+interval_height*2:start_top+box_height+interval_height*2, start_left:start_left+box_width, 0:3]
answer_4 = whole_img[start_top+interval_height*3:start_top+box_height+interval_height*3, start_left:start_left+box_width, 0:3]
return answer_1, answer_2, answer_3, answer_4
def cal_num_scalar(image, color):
num =0
for loop in range(image.shape[0]):
for loop2 in range(image.shape[1]):
if image[loop][loop2][0] == color[0] :# and image[loop][loop2][1] == color[1] and image[loop][loop2][2] == color[2]:
continue
else:
#print(image[loop][loop2][0:3])
num = num+1
return num
def cal_num(image, color):
num = 0
image_useful = image[:, :, 0] != color[0]
num = np.sum(np.sum(image_useful))
return int(num)
def cal_num_cat(image, color):
if 0:
height_split = int(image.shape[0]/3)
num = ""
for i in range(3):
image_useful = image[height_split * i:height_split * (i+1), :, 0] != color[0]
num1 = np.sum(np.sum(image_useful))
num += str(num1)
return int(np.int(num))
else:
width_split = int(image.shape[1]/2)
data_str = ""
for i in range(2):
image_useful = image[:, width_split * i:width_split * (i+1), 0] != color[0]
num = np.sum(np.sum(image_useful))
num_str = str(num)
if num_str.__len__() == 1:
num_str = "0000" + num_str
elif num_str.__len__() == 2:
num_str = "000" + num_str
elif num_str.__len__() == 3:
num_str = "00" + num_str
elif num_str.__len__() == 4:
num_str = "0" + num_str
elif num_str.__len__() == 5:
pass
else:
assert False, "num_str length error. length: %d" % num_str.__len__()
data_str += num_str
return data_str
def cal_num1(image, color):
num =0
for loop in range(image.shape[0]):
for loop2 in range(image.shape[1]):
if sum(image[loop][loop2][0:3] == color) == 3:
continue
else:
#print(image[loop][loop2][0:3])
num = num+1
return num
def selection(correct_loss, loss1, loss2, loss3, loss4):
a = np.array([loss1, loss2, loss3, loss4])
a = np.abs(a-correct_loss)
sort_id = np.argmin(a)
#print("selection: ",a, sort_id)
return sort_id
def selection_str(correct_loss, loss1, loss2, loss3, loss4):
def split_str(loss):
loss_1 = loss[0:5]
loss_2 = loss[5:10]
out = np.zeros(shape=(1, 2))
out[0, 0] = int(loss_1)
out[0, 1] = int(loss_2)
return out
a = np.concatenate([split_str(loss1), split_str(loss2), split_str(loss3), split_str(loss4)], axis=0)
a = np.abs(a-split_str(correct_loss))
b = np.max(a, axis=1)
sort_id = np.argmin(b)
# print("selection: ",b, sort_id)
return sort_id
def selection_str_rValue(correct_loss, loss1, loss2, loss3, loss4):
def split_str(loss):
loss_1 = loss[0:5]
loss_2 = loss[5:10]
out = np.zeros(shape=(1, 2))
try:
out[0, 0] = int(loss_1)
out[0, 1] = int(loss_2)
except ValueError:
print(loss)
assert False, "ValueError"
return out
a = np.concatenate([split_str(loss1), split_str(loss2), split_str(loss3), split_str(loss4)], axis=0)
a = np.abs(a-split_str(correct_loss))
b = np.max(a, axis=1)
sort_id = np.argmin(b)
# print("selection: ",b, sort_id)
return [sort_id, b[sort_id]]
if __name__ == "__main__":
#img_label_green_2 = io.imread(os.path.join(PICS_DIR,"answer_1.png"))
#img_question = io.imread(os.path.join(PICS_DIR,"question_0.png"))
#img_question_2 = io.imread(os.path.join(PICS_DIR,"question_1.png"))
#img_whole_green = io.imread(os.path.join(PICS_DIR,"autojump_1.png"))
##raw grey image
img_whole_grey = io.imread(os.path.join(PICS_DIR,"autojump_0.png"))
##crop question and answer,and get descriptor
question = img_whole_grey[question_pos[0]:question_pos[1], question_pos[2]:question_pos[3],0:3]
correct_question = cal_num(question, WHITE)
## another raw image
img_whole_grey = io.imread(os.path.join(PICS_DIR,"autojump_1.png"))
##crop question and answer,and get descriptor
question_new = img_whole_grey[question_pos[0]:question_pos[1], question_pos[2]:question_pos[3],0:3]
correct_question_new = cal_num(question, WHITE)
#########
io.imshow(question-question_new)
answer_1, answer_2, answer_3, answer_4 = crop_answer(img_whole_grey)
loss1 = cal_num(answer_1, GREY)
loss2 = cal_num(answer_2, GREY)
loss3 = cal_num(answer_3, GREY)
loss4 = cal_num(answer_4, GREY)
##calculate library's key value(questions')
img_question = io.imread(os.path.join(PICS_DIR,"question_0.png"))
loss_ques = cal_num(img_question, WHITE)
correct_answer = io.imread(os.path.join(PICS_DIR,"answer_0.png"))
correct_loss = cal_num(correct_answer, GREEN)
id = selection(correct_loss, loss1, loss2, loss3, loss4)
print(id)
#i=3
#img_label_grey_first = img_whole_grey[start_top+interval_height*i:start_top+box_height+interval_height*i, start_left:start_left+box_width, 0:3]
#img_label_grey_second = img_whole_green[start_top+interval_height*i:start_top+box_height+interval_height*i, start_left:start_left+box_width, 0:3]
#io.imshow(-img_label_grey_second+img_label_grey_first)
#io.imshow(img_label_grey_second-img_label_grey_first)
#label_num_pixel = cal_num(img_label_green, GREEN)
#print("LABEL_NUM_PIXEL: ", label_num_pixel)
#
#
#label_num_pixel_2 = cal_num(img_label_green_2, GREEN)
#print("LABEL_NUM_PIXEL_2: ", label_num_pixel_2)
#
#label_num_pixel_3 = cal_num(img_label_green_3, GREEN)
#print("LABEL_NUM_PIXEL_3: ", label_num_pixel_3)
#
#Q_num_pixel = cal_num(img_question, WHITE)
#print("Q_NUM_PIXEL: ", Q_num_pixel)
#
#label_num_pixel_grey = cal_num(img_label_grey, GREY)
#print("LABEL_NUM_PIXEL_GREY: ", label_num_pixel_grey)
#
#label_num_pixel_grey_first = cal_num(img_label_grey_first, GREY)
#print("LABEL_NUM_PIXEL_GREY_F: ", label_num_pixel_grey_first)
#
#label_num_pixel_grey_second = cal_num(img_label_grey_second, GREEN)
#print("LABEL_NUM_PIXEL_GREY_S: ", label_num_pixel_grey_second)
``` |
{
"source": "jianglb-alibaba/djangular-0.2.7",
"score": 2
} |
#### File: management/commands/testjs.py
```python
import os
import re
import subprocess
import tempfile
from django import template
from django.core import management as mgmt
from django.conf import settings
from djangular import utils
from optparse import make_option
class Command(utils.SiteAndPathUtils, mgmt.base.BaseCommand):
"""
A base command that calls Karma from the command line, passing the options and arguments directly.
"""
help = ("Runs the JS Karma tests for the given test type and apps. If no apps are specified, tests will be "
"run for every app in INSTALLED_APPS.")
args = '[type] [appname ...]'
option_list = mgmt.base.BaseCommand.option_list + (
make_option('--greedy', action='store_true',
help="Run every app in the project, ignoring passed in apps and the INSTALLED_APPS setting. "
"Note that running e2e tests for non-installed apps will most likely cause them to fail."),
)
requires_model_validation = False
default_test_type = 'unit'
template_dir = 'templates'
def get_existing_apps_from(self, app_list):
"""
Retrieves the apps from the given app_list that exist on the file system.
"""
project_root = self.get_project_root()
existing_paths = []
for app_name in app_list:
app_name_components = app_name.split('.')
app_path = os.path.join(*app_name_components)
full_app_path = os.path.join(project_root, app_path)
if os.path.exists(full_app_path):
existing_paths.append(app_path)
if self.verbosity >= 2:
self.stdout.write("Running %s tests from apps: %s" % (self.test_type, ', '.join(existing_paths)))
return existing_paths
def usage(self, subcommand):
# Default message when templates are missing
types_message = mgmt.color_style().ERROR(
"NOTE: You will need to run the following command to create the needed Karma config templates before "
"running this command.\n"
" python manage.py makeangularsite"
)
# Check and see if templates exist
template_path = os.path.join(self.get_default_site_app(), self.template_dir)
if os.path.exists(template_path) and os.path.isdir(template_path):
filename_matches = [re.match(r'^karma-(.*).conf.js$', filename)
for filename in os.listdir(template_path)]
template_types = [match.group(1) for match in filename_matches if match]
if len(template_types):
types_message = '\n'.join(["The following types of Karma tests are available:"] +
[" %s%s" % (test_type, '*' if test_type == self.default_test_type else '')
for test_type in template_types] +
["", "If no apps are listed, tests from all the INSTALLED_APPS will be run."])
# Append template message to standard usage
parent_usage = super(Command, self).usage(subcommand)
return "%s\n\n%s" % (parent_usage, types_message)
def handle(self, test_type=None, *args, **options):
self.verbosity = int(options.get('verbosity'))
self.test_type = test_type or self.default_test_type
# Determine template location
karma_config_template = \
os.path.join(self.get_default_site_app(), self.template_dir, 'karma-%s.conf.js' % self.test_type)
if self.verbosity >= 2:
self.stdout.write("Using karma template: %s" % karma_config_template)
if not os.path.exists(karma_config_template):
raise IOError("Karma template %s was not found." % karma_config_template)
# Establish the Context for the template
if options.get('greedy', False):
app_paths = ['**']
if self.verbosity >= 2:
self.stdout.write("Running %s tests for all applications in the project." % self.test_type)
elif len(args):
app_paths = self.get_existing_apps_from(set(args) & set(settings.INSTALLED_APPS))
else:
app_paths = self.get_existing_apps_from(settings.INSTALLED_APPS)
context = template.Context(dict(options, **{
'app_paths': app_paths,
'djangular_root': self.get_djangular_root()
}), autoescape=False)
# Establish the template content in memory
with open(karma_config_template, 'rb') as config_template:
template_content = config_template.read()
template_content = template_content.decode('utf-8')
js_template = template.Template(template_content)
template_content = js_template.render(context)
template_content = template_content.encode('utf-8')
if self.verbosity >= 3:
self.stdout.write("\n")
self.stdout.write("Karma config contents")
self.stdout.write("---------------------")
self.stdout.write(template_content)
self.stdout.write("\n")
if not template_content:
raise IOError("The produced Karma config was empty.")
# Write the template content to the temp file and close it, so the karma process can read it
temp_config_file = tempfile.NamedTemporaryFile(suffix='.conf.js', prefix='tmp_karma_',
dir=self.get_default_site_app(),
delete=False) # Manually delete so subprocess can read
try:
temp_config_file.write(template_content)
temp_config_file.close()
# Start the karma process
self.stdout.write("\n")
self.stdout.write("Starting Karma Server (https://github.com/karma-runner/karma)\n")
self.stdout.write("-------------------------------------------------------------\n")
subprocess.call(['karma', 'start', temp_config_file.name])
# When the user kills the karma process, do nothing, then remove the temp file
except KeyboardInterrupt:
pass
finally:
os.remove(temp_config_file.name)
```
#### File: djangular/tests/utils.py
```python
import os
from djangular import utils
from django.test import SimpleTestCase
class SiteAndPathUtilsTest(SimpleTestCase):
site_utils = utils.SiteAndPathUtils()
def test_djangular_root(self):
current_dir = os.path.dirname(os.path.abspath(__file__))
djangular_dir = os.path.dirname(current_dir)
self.assertEqual(djangular_dir, self.site_utils.get_djangular_root())
``` |
{
"source": "jiangleads/EC-",
"score": 3
} |
#### File: jiangleads/EC-/rename_era5_grib.py
```python
from __future__ import print_function
import traceback
import sys
from eccodes import *
#判断字符串Str是否包含序列SubStrList中的每一个子字符串
def IsSubString(SubStrList,Str):
flag=True
for substr in SubStrList:
if not(substr in Str):
flag=False
return flag
#获取当前目录所有指定类型的文件
def GetFileList(FindPath,FlagStr=[]):
import os
FileList=[]
#print FileList
FileNames=os.listdir(FindPath)
#print FileNames
for fn in FileNames:
if (len(FlagStr)>0):
#返回指定类型的文件名
if (IsSubString(FlagStr,fn)):
fullfilename=os.path.join(FindPath,fn)
FileList.append(fullfilename)
else:
#默认直接返回所有文件名
fullfilename=os.path.join(FindPath,fn)
FileList.append(fullfilename)
#对文件名排序
if (len(FileList)>0):
FileList.sort()
#print FileList
return FileList
def example(filename):
f = open(filename, 'rb')
keys = [
## 'Ni',
## 'Nj',
## 'latitudeOfFirstGridPointInDegrees',
## 'longitudeOfFirstGridPointInDegrees',
## 'latitudeOfLastGridPointInDegrees',
## 'longitudeOfLastGridPointInDegrees',
'dataDate',
## 'dataTime'
]
dataDate=0
while 1:
gid = codes_grib_new_from_file(f)
if gid is None:
break
for key in keys:
try:
## print(' %s: %s' % (key, codes_get(gid, key)))
dataDate2=codes_get(gid, key)
if dataDate != dataDate2:
print(key,dataDate,dataDate2)
dataDate=dataDate2
## input()
continue
except KeyValueNotFoundError as err:
# Full list of exceptions here:
# https://confluence.ecmwf.int/display/ECC/Python+exception+classes
print(' Key="%s" was not found: %s' % (key, err.msg))
except CodesInternalError as err:
print('Error with key="%s" : %s' % (key, err.msg))
## print('There are %d values, average is %f, min is %f, max is %f' % (
## codes_get_size(gid, 'values'),
## codes_get(gid, 'average'),
## codes_get(gid, 'min'),
## codes_get(gid, 'max')
## ))
codes_release(gid)
f.close()
#改名
newname=os.path.dirname(filename)+"/era5.CHV.levels."+str(dataDate)+".grib"
os.rename(filename,newname)
print(filename,'======>',newname)
import os
from eccodes import *
Path='/mnt/d/Downloads/' #
SubStrList=['.grib']
FileList=GetFileList(Path,SubStrList) #得到指定类型(grib)文件名列表
##
##
for eachfile in FileList: #对每个文件操作
print(eachfile)
example(eachfile)
``` |
{
"source": "jianglei12138/python2.7",
"score": 3
} |
#### File: youtube_dl/extractor/vevo.py
```python
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
compat_str,
compat_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
sanitized_Request,
parse_iso8601,
)
class VevoBaseIE(InfoExtractor):
def _extract_json(self, webpage, video_id, item):
return self._parse_json(
self._search_regex(
r'window\.__INITIAL_STORE__\s*=\s*({.+?});\s*</script>',
webpage, 'initial store'),
video_id)['default'][item]
class VevoIE(VevoBaseIE):
'''
Accepts urls from vevo.com or in the format 'vevo:{id}'
(currently used by MTVIE and MySpaceIE)
'''
_VALID_URL = r'''(?x)
(?:https?://www\.vevo\.com/watch/(?!playlist|genre)(?:[^/]+/(?:[^/]+/)?)?|
https?://cache\.vevo\.com/m/html/embed\.html\?video=|
https?://videoplayer\.vevo\.com/embed/embedded\?videoId=|
vevo:)
(?P<id>[^&?#]+)'''
_TESTS = [{
'url': 'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280',
'md5': '95ee28ee45e70130e3ab02b0f579ae23',
'info_dict': {
'id': 'GB1101300280',
'ext': 'mp4',
'title': 'Hurts - Somebody to Die For',
'timestamp': 1372057200,
'upload_date': '20130624',
'uploader': 'Hurts',
'track': 'Somebody to Die For',
'artist': 'Hurts',
'genre': 'Pop',
},
'expected_warnings': ['Unable to download SMIL file'],
}, {
'note': 'v3 SMIL format',
'url': 'http://www.vevo.com/watch/cassadee-pope/i-wish-i-could-break-your-heart/USUV71302923',
'md5': 'f6ab09b034f8c22969020b042e5ac7fc',
'info_dict': {
'id': 'USUV71302923',
'ext': 'mp4',
'title': 'Cassadee Pope - I Wish I Could Break Your Heart',
'timestamp': 1392796919,
'upload_date': '20140219',
'uploader': 'Cassadee Pope',
'track': 'I Wish I Could Break Your Heart',
'artist': 'Cassadee Pope',
'genre': 'Country',
},
'expected_warnings': ['Unable to download SMIL file'],
}, {
'note': 'Age-limited video',
'url': 'https://www.vevo.com/watch/justin-timberlake/tunnel-vision-explicit/USRV81300282',
'info_dict': {
'id': 'USRV81300282',
'ext': 'mp4',
'title': '<NAME> - Tunnel Vision (Explicit)',
'age_limit': 18,
'timestamp': 1372888800,
'upload_date': '20130703',
'uploader': '<NAME>',
'track': 'Tunnel Vision (Explicit)',
'artist': '<NAME>',
'genre': 'Pop',
},
'expected_warnings': ['Unable to download SMIL file'],
}, {
'note': 'No video_info',
'url': 'http://www.vevo.com/watch/k-camp-1/Till-I-Die/USUV71503000',
'md5': '8b83cc492d72fc9cf74a02acee7dc1b0',
'info_dict': {
'id': 'USUV71503000',
'ext': 'mp4',
'title': 'K Camp - Till I Die',
'age_limit': 18,
'timestamp': 1449468000,
'upload_date': '20151207',
'uploader': 'K Camp',
'track': 'Till I Die',
'artist': 'K Camp',
'genre': 'Rap/Hip-Hop',
},
}, {
'note': 'Only available via webpage',
'url': 'http://www.vevo.com/watch/GBUV71600656',
'md5': '67e79210613865b66a47c33baa5e37fe',
'info_dict': {
'id': 'GBUV71600656',
'ext': 'mp4',
'title': 'ABC - Viva Love',
'age_limit': 0,
'timestamp': 1461830400,
'upload_date': '20160428',
'uploader': 'ABC',
'track': 'Viva Love',
'artist': 'ABC',
'genre': 'Pop',
},
'expected_warnings': ['Failed to download video versions info'],
}, {
# no genres available
'url': 'http://www.vevo.com/watch/INS171400764',
'only_matching': True,
}]
_SMIL_BASE_URL = 'http://smil.lvl3.vevo.com'
_SOURCE_TYPES = {
0: 'youtube',
1: 'brightcove',
2: 'http',
3: 'hls_ios',
4: 'hls',
5: 'smil', # http
7: 'f4m_cc',
8: 'f4m_ak',
9: 'f4m_l3',
10: 'ism',
13: 'smil', # rtmp
18: 'dash',
}
_VERSIONS = {
0: 'youtube', # only in AuthenticateVideo videoVersions
1: 'level3',
2: 'akamai',
3: 'level3',
4: 'amazon',
}
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
formats = []
els = smil.findall('.//{http://www.w3.org/2001/SMIL20/Language}video')
for el in els:
src = el.attrib['src']
m = re.match(r'''(?xi)
(?P<ext>[a-z0-9]+):
(?P<path>
[/a-z0-9]+ # The directory and main part of the URL
_(?P<tbr>[0-9]+)k
_(?P<width>[0-9]+)x(?P<height>[0-9]+)
_(?P<vcodec>[a-z0-9]+)
_(?P<vbr>[0-9]+)
_(?P<acodec>[a-z0-9]+)
_(?P<abr>[0-9]+)
\.[a-z0-9]+ # File extension
)''', src)
if not m:
continue
format_url = self._SMIL_BASE_URL + m.group('path')
formats.append({
'url': format_url,
'format_id': 'smil_' + m.group('tbr'),
'vcodec': m.group('vcodec'),
'acodec': m.group('acodec'),
'tbr': int(m.group('tbr')),
'vbr': int(m.group('vbr')),
'abr': int(m.group('abr')),
'ext': m.group('ext'),
'width': int(m.group('width')),
'height': int(m.group('height')),
})
return formats
def _initialize_api(self, video_id):
req = sanitized_Request(
'http://www.vevo.com/auth', data=b'')
webpage = self._download_webpage(
req, None,
note='Retrieving oauth token',
errnote='Unable to retrieve oauth token')
if 'THIS PAGE IS CURRENTLY UNAVAILABLE IN YOUR REGION' in webpage:
self.raise_geo_restricted(
'%s said: This page is currently unavailable in your region' % self.IE_NAME)
auth_info = self._parse_json(webpage, video_id)
self._api_url_template = self.http_scheme() + '//apiv2.vevo.com/%s?token=' + auth_info['access_token']
def _call_api(self, path, *args, **kwargs):
return self._download_json(self._api_url_template % path, *args, **kwargs)
def _real_extract(self, url):
video_id = self._match_id(url)
json_url = 'http://api.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id
response = self._download_json(
json_url, video_id, 'Downloading video info',
'Unable to download info', fatal=False) or {}
video_info = response.get('video') or {}
artist = None
featured_artist = None
uploader = None
view_count = None
formats = []
if not video_info:
if response and response.get('statusCode') != 909:
ytid = response.get('errorInfo', {}).get('ytid')
if ytid:
self.report_warning(
'Video is geoblocked, trying with the YouTube video %s' % ytid)
return self.url_result(ytid, 'Youtube', ytid)
if 'statusMessage' in response:
raise ExtractorError('%s said: %s' % (
self.IE_NAME, response['statusMessage']), expected=True)
raise ExtractorError('Unable to extract videos')
self._initialize_api(video_id)
video_info = self._call_api(
'video/%s' % video_id, video_id, 'Downloading api video info',
'Failed to download video info')
video_versions = self._call_api(
'video/%s/streams' % video_id, video_id,
'Downloading video versions info',
'Failed to download video versions info',
fatal=False)
# Some videos are only available via webpage (e.g.
# https://github.com/rg3/youtube-dl/issues/9366)
if not video_versions:
webpage = self._download_webpage(url, video_id)
video_versions = self._extract_json(webpage, video_id, 'streams')[video_id][0]
timestamp = parse_iso8601(video_info.get('releaseDate'))
artists = video_info.get('artists')
if artists:
artist = uploader = artists[0]['name']
view_count = int_or_none(video_info.get('views', {}).get('total'))
for video_version in video_versions:
version = self._VERSIONS.get(video_version['version'])
version_url = video_version.get('url')
if not version_url:
continue
if '.ism' in version_url:
continue
elif '.mpd' in version_url:
formats.extend(self._extract_mpd_formats(
version_url, video_id, mpd_id='dash-%s' % version,
note='Downloading %s MPD information' % version,
errnote='Failed to download %s MPD information' % version,
fatal=False))
elif '.m3u8' in version_url:
formats.extend(self._extract_m3u8_formats(
version_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls-%s' % version,
note='Downloading %s m3u8 information' % version,
errnote='Failed to download %s m3u8 information' % version,
fatal=False))
else:
m = re.search(r'''(?xi)
_(?P<width>[0-9]+)x(?P<height>[0-9]+)
_(?P<vcodec>[a-z0-9]+)
_(?P<vbr>[0-9]+)
_(?P<acodec>[a-z0-9]+)
_(?P<abr>[0-9]+)
\.(?P<ext>[a-z0-9]+)''', version_url)
if not m:
continue
formats.append({
'url': version_url,
'format_id': 'http-%s-%s' % (version, video_version['quality']),
'vcodec': m.group('vcodec'),
'acodec': m.group('acodec'),
'vbr': int(m.group('vbr')),
'abr': int(m.group('abr')),
'ext': m.group('ext'),
'width': int(m.group('width')),
'height': int(m.group('height')),
})
else:
timestamp = int_or_none(self._search_regex(
r'/Date\((\d+)\)/',
video_info['releaseDate'], 'release date', fatal=False),
scale=1000)
artists = video_info.get('mainArtists')
if artists:
artist = uploader = artists[0]['artistName']
featured_artists = video_info.get('featuredArtists')
if featured_artists:
featured_artist = featured_artists[0]['artistName']
smil_parsed = False
for video_version in video_info['videoVersions']:
version = self._VERSIONS.get(video_version['version'])
if version == 'youtube':
continue
else:
source_type = self._SOURCE_TYPES.get(video_version['sourceType'])
renditions = compat_etree_fromstring(video_version['data'])
if source_type == 'http':
for rend in renditions.findall('rendition'):
attr = rend.attrib
formats.append({
'url': attr['url'],
'format_id': 'http-%s-%s' % (version, attr['name']),
'height': int_or_none(attr.get('frameheight')),
'width': int_or_none(attr.get('frameWidth')),
'tbr': int_or_none(attr.get('totalBitrate')),
'vbr': int_or_none(attr.get('videoBitrate')),
'abr': int_or_none(attr.get('audioBitrate')),
'vcodec': attr.get('videoCodec'),
'acodec': attr.get('audioCodec'),
})
elif source_type == 'hls':
formats.extend(self._extract_m3u8_formats(
renditions.find('rendition').attrib['url'], video_id,
'mp4', 'm3u8_native', m3u8_id='hls-%s' % version,
note='Downloading %s m3u8 information' % version,
errnote='Failed to download %s m3u8 information' % version,
fatal=False))
elif source_type == 'smil' and version == 'level3' and not smil_parsed:
formats.extend(self._extract_smil_formats(
renditions.find('rendition').attrib['url'], video_id, False))
smil_parsed = True
self._sort_formats(formats)
track = video_info['title']
if featured_artist:
artist = '%s ft. %s' % (artist, featured_artist)
title = '%s - %s' % (artist, track) if artist else track
genres = video_info.get('genres')
genre = (
genres[0] if genres and isinstance(genres, list) and
isinstance(genres[0], compat_str) else None)
is_explicit = video_info.get('isExplicit')
if is_explicit is True:
age_limit = 18
elif is_explicit is False:
age_limit = 0
else:
age_limit = None
duration = video_info.get('duration')
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': video_info.get('imageUrl') or video_info.get('thumbnailUrl'),
'timestamp': timestamp,
'uploader': uploader,
'duration': duration,
'view_count': view_count,
'age_limit': age_limit,
'track': track,
'artist': uploader,
'genre': genre,
}
class VevoPlaylistIE(VevoBaseIE):
_VALID_URL = r'https?://www\.vevo\.com/watch/(?P<kind>playlist|genre)/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.vevo.com/watch/playlist/dadbf4e7-b99f-4184-9670-6f0e547b6a29',
'info_dict': {
'id': 'dadbf4e7-b99f-4184-9670-6f0e547b6a29',
'title': 'Best-Of: Birdman',
},
'playlist_count': 10,
}, {
'url': 'http://www.vevo.com/watch/genre/rock',
'info_dict': {
'id': 'rock',
'title': 'Rock',
},
'playlist_count': 20,
}, {
'url': 'http://www.vevo.com/watch/playlist/dadbf4e7-b99f-4184-9670-6f0e547b6a29?index=0',
'md5': '32dcdfddddf9ec6917fc88ca26d36282',
'info_dict': {
'id': 'USCMV1100073',
'ext': 'mp4',
'title': 'Birdman - Y.U. MAD',
'timestamp': 1323417600,
'upload_date': '20111209',
'uploader': 'Birdman',
'track': 'Y.U. MAD',
'artist': 'Birdman',
'genre': 'Rap/Hip-Hop',
},
'expected_warnings': ['Unable to download SMIL file'],
}, {
'url': 'http://www.vevo.com/watch/genre/rock?index=0',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
playlist_kind = mobj.group('kind')
webpage = self._download_webpage(url, playlist_id)
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
index = qs.get('index', [None])[0]
if index:
video_id = self._search_regex(
r'<meta[^>]+content=(["\'])vevo://video/(?P<id>.+?)\1[^>]*>',
webpage, 'video id', default=None, group='id')
if video_id:
return self.url_result('vevo:%s' % video_id, VevoIE.ie_key())
playlists = self._extract_json(webpage, playlist_id, '%ss' % playlist_kind)
playlist = (list(playlists.values())[0]
if playlist_kind == 'playlist' else playlists[playlist_id])
entries = [
self.url_result('vevo:%s' % src, VevoIE.ie_key())
for src in playlist['isrcs']]
return self.playlist_result(
entries, playlist.get('playlistId') or playlist_id,
playlist.get('name'), playlist.get('description'))
``` |
{
"source": "jianglei12138/python-3.5.1",
"score": 3
} |
#### File: main/assets/pythonforandroid.py
```python
import youtube_dl
class SimpleYDL(youtube_dl.YoutubeDL):
def __init__(self, *args, **kargs):
super(SimpleYDL, self).__init__(*args, **kargs)
self.add_default_info_extractors()
def get_stream(ytid):
ydl_opts = {
'nocheckcertificate': True,
'skip_download': True,
'cachedir': False,
'format': 18,
'prefer_insecure': True,
}
res = SimpleYDL(ydl_opts).extract_info(ytid, process=False, download=False)
data = {item['format_id']: item['url'] for item in res['formats']}
data['title'] = res['title']
data['info'] = res['description']
obj = {'duration':res['duration'] }
obj['stream'] = data['18']
return obj
def add_func(a,b):
return a+b
```
#### File: youtube_dl/downloader/f4m.py
```python
from __future__ import division, unicode_literals
import base64
import io
import itertools
import os
import time
from .fragment import FragmentFD
from ..compat import (
compat_etree_fromstring,
compat_urlparse,
compat_urllib_error,
compat_urllib_parse_urlparse,
)
from ..utils import (
encodeFilename,
fix_xml_ampersands,
sanitize_open,
struct_pack,
struct_unpack,
xpath_text,
)
class FlvReader(io.BytesIO):
"""
Reader for Flv files
The file format is documented in https://www.adobe.com/devnet/f4v.html
"""
# Utility functions for reading numbers and strings
def read_unsigned_long_long(self):
return struct_unpack('!Q', self.read(8))[0]
def read_unsigned_int(self):
return struct_unpack('!I', self.read(4))[0]
def read_unsigned_char(self):
return struct_unpack('!B', self.read(1))[0]
def read_string(self):
res = b''
while True:
char = self.read(1)
if char == b'\x00':
break
res += char
return res
def read_box_info(self):
"""
Read a box and return the info as a tuple: (box_size, box_type, box_data)
"""
real_size = size = self.read_unsigned_int()
box_type = self.read(4)
header_end = 8
if size == 1:
real_size = self.read_unsigned_long_long()
header_end = 16
return real_size, box_type, self.read(real_size - header_end)
def read_asrt(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
quality_entry_count = self.read_unsigned_char()
# QualityEntryCount
for i in range(quality_entry_count):
self.read_string()
segment_run_count = self.read_unsigned_int()
segments = []
for i in range(segment_run_count):
first_segment = self.read_unsigned_int()
fragments_per_segment = self.read_unsigned_int()
segments.append((first_segment, fragments_per_segment))
return {
'segment_run': segments,
}
def read_afrt(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
# time scale
self.read_unsigned_int()
quality_entry_count = self.read_unsigned_char()
# QualitySegmentUrlModifiers
for i in range(quality_entry_count):
self.read_string()
fragments_count = self.read_unsigned_int()
fragments = []
for i in range(fragments_count):
first = self.read_unsigned_int()
first_ts = self.read_unsigned_long_long()
duration = self.read_unsigned_int()
if duration == 0:
discontinuity_indicator = self.read_unsigned_char()
else:
discontinuity_indicator = None
fragments.append({
'first': first,
'ts': first_ts,
'duration': duration,
'discontinuity_indicator': discontinuity_indicator,
})
return {
'fragments': fragments,
}
def read_abst(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
self.read_unsigned_int() # BootstrapinfoVersion
# Profile,Live,Update,Reserved
flags = self.read_unsigned_char()
live = flags & 0x20 != 0
# time scale
self.read_unsigned_int()
# CurrentMediaTime
self.read_unsigned_long_long()
# SmpteTimeCodeOffset
self.read_unsigned_long_long()
self.read_string() # MovieIdentifier
server_count = self.read_unsigned_char()
# ServerEntryTable
for i in range(server_count):
self.read_string()
quality_count = self.read_unsigned_char()
# QualityEntryTable
for i in range(quality_count):
self.read_string()
# DrmData
self.read_string()
# MetaData
self.read_string()
segments_count = self.read_unsigned_char()
segments = []
for i in range(segments_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'asrt'
segment = FlvReader(box_data).read_asrt()
segments.append(segment)
fragments_run_count = self.read_unsigned_char()
fragments = []
for i in range(fragments_run_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'afrt'
fragments.append(FlvReader(box_data).read_afrt())
return {
'segments': segments,
'fragments': fragments,
'live': live,
}
def read_bootstrap_info(self):
total_size, box_type, box_data = self.read_box_info()
assert box_type == b'abst'
return FlvReader(box_data).read_abst()
def read_bootstrap_info(bootstrap_bytes):
return FlvReader(bootstrap_bytes).read_bootstrap_info()
def build_fragments_list(boot_info):
""" Return a list of (segment, fragment) for each fragment in the video """
res = []
segment_run_table = boot_info['segments'][0]
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
first_frag_number = fragment_run_entry_table[0]['first']
fragments_counter = itertools.count(first_frag_number)
for segment, fragments_count in segment_run_table['segment_run']:
for _ in range(fragments_count):
res.append((segment, next(fragments_counter)))
if boot_info['live']:
res = res[-2:]
return res
def write_unsigned_int(stream, val):
stream.write(struct_pack('!I', val))
def write_unsigned_int_24(stream, val):
stream.write(struct_pack('!I', val)[1:])
def write_flv_header(stream):
"""Writes the FLV header to stream"""
# FLV header
stream.write(b'FLV\x01')
stream.write(b'\x05')
stream.write(b'\x00\x00\x00\x09')
stream.write(b'\x00\x00\x00\x00')
def write_metadata_tag(stream, metadata):
"""Writes optional metadata tag to stream"""
SCRIPT_TAG = b'\x12'
FLV_TAG_HEADER_LEN = 11
if metadata:
stream.write(SCRIPT_TAG)
write_unsigned_int_24(stream, len(metadata))
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
stream.write(metadata)
write_unsigned_int(stream, FLV_TAG_HEADER_LEN + len(metadata))
def _add_ns(prop):
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
class F4mFD(FragmentFD):
"""
A downloader for f4m manifests or AdobeHDS.
"""
FD_NAME = 'f4m'
def _get_unencrypted_media(self, doc):
media = doc.findall(_add_ns('media'))
if not media:
self.report_error('No media found')
for e in (doc.findall(_add_ns('drmAdditionalHeader')) +
doc.findall(_add_ns('drmAdditionalHeaderSet'))):
# If id attribute is missing it's valid for all media nodes
# without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute
if 'id' not in e.attrib:
self.report_error('Missing ID in f4m DRM')
media = list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and
'drmAdditionalHeaderSetId' not in e.attrib,
media))
if not media:
self.report_error('Unsupported DRM')
return media
def _get_bootstrap_from_url(self, bootstrap_url):
bootstrap = self.ydl.urlopen(bootstrap_url).read()
return read_bootstrap_info(bootstrap)
def _update_live_fragments(self, bootstrap_url, latest_fragment):
fragments_list = []
retries = 30
while (not fragments_list) and (retries > 0):
boot_info = self._get_bootstrap_from_url(bootstrap_url)
fragments_list = build_fragments_list(boot_info)
fragments_list = [f for f in fragments_list if f[1] > latest_fragment]
if not fragments_list:
# Retry after a while
time.sleep(5.0)
retries -= 1
if not fragments_list:
self.report_error('Failed to update fragments')
return fragments_list
def _parse_bootstrap_node(self, node, base_url):
# Sometimes non empty inline bootstrap info can be specified along
# with bootstrap url attribute (e.g. dummy inline bootstrap info
# contains whitespace characters in [1]). We will prefer bootstrap
# url over inline bootstrap info when present.
# 1. http://live-1-1.rutube.ru/stream/1024/HDS/SD/C2NKsS85HQNckgn5HdEmOQ/1454167650/S-s604419906/move/four/dirs/upper/1024-576p.f4m
bootstrap_url = node.get('url')
if bootstrap_url:
bootstrap_url = compat_urlparse.urljoin(
base_url, bootstrap_url)
boot_info = self._get_bootstrap_from_url(bootstrap_url)
else:
bootstrap_url = None
bootstrap = base64.b64decode(node.text.encode('ascii'))
boot_info = read_bootstrap_info(bootstrap)
return boot_info, bootstrap_url
def real_download(self, filename, info_dict):
man_url = info_dict['url']
requested_bitrate = info_dict.get('tbr')
self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
urlh = self.ydl.urlopen(man_url)
man_url = urlh.geturl()
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244
# and https://github.com/rg3/youtube-dl/issues/7823)
manifest = fix_xml_ampersands(urlh.read().decode('utf-8', 'ignore')).strip()
doc = compat_etree_fromstring(manifest)
formats = [(int(f.attrib.get('bitrate', -1)), f)
for f in self._get_unencrypted_media(doc)]
if requested_bitrate is None:
# get the best format
formats = sorted(formats, key=lambda f: f[0])
rate, media = formats[-1]
else:
rate, media = list(filter(
lambda f: int(f[0]) == requested_bitrate, formats))[0]
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
boot_info, bootstrap_url = self._parse_bootstrap_node(bootstrap_node, base_url)
live = boot_info['live']
metadata_node = media.find(_add_ns('metadata'))
if metadata_node is not None:
metadata = base64.b64decode(metadata_node.text.encode('ascii'))
else:
metadata = None
fragments_list = build_fragments_list(boot_info)
test = self.params.get('test', False)
if test:
# We only download the first fragment
fragments_list = fragments_list[:1]
total_frags = len(fragments_list)
# For some akamai manifests we'll need to add a query to the fragment url
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
ctx = {
'filename': filename,
'total_frags': total_frags,
'live': live,
}
self._prepare_frag_download(ctx)
dest_stream = ctx['dest_stream']
write_flv_header(dest_stream)
if not live:
write_metadata_tag(dest_stream, metadata)
base_url_parsed = compat_urllib_parse_urlparse(base_url)
self._start_frag_download(ctx)
frags_filenames = []
while fragments_list:
seg_i, frag_i = fragments_list.pop(0)
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
query = []
if base_url_parsed.query:
query.append(base_url_parsed.query)
if akamai_pv:
query.append(akamai_pv.strip(';'))
if info_dict.get('extra_param_to_segment_url'):
query.append(info_dict['extra_param_to_segment_url'])
url_parsed = base_url_parsed._replace(path=base_url_parsed.path + name, query='&'.join(query))
frag_filename = '%s-%s' % (ctx['tmpfilename'], name)
try:
success = ctx['dl'].download(frag_filename, {'url': url_parsed.geturl()})
if not success:
return False
(down, frag_sanitized) = sanitize_open(frag_filename, 'rb')
down_data = down.read()
down.close()
reader = FlvReader(down_data)
while True:
_, box_type, box_data = reader.read_box_info()
if box_type == b'mdat':
dest_stream.write(box_data)
break
if live:
os.remove(encodeFilename(frag_sanitized))
else:
frags_filenames.append(frag_sanitized)
except (compat_urllib_error.HTTPError, ) as err:
if live and (err.code == 404 or err.code == 410):
# We didn't keep up with the live window. Continue
# with the next available fragment.
msg = 'Fragment %d unavailable' % frag_i
self.report_warning(msg)
fragments_list = []
else:
raise
if not fragments_list and not test and live and bootstrap_url:
fragments_list = self._update_live_fragments(bootstrap_url, frag_i)
total_frags += len(fragments_list)
if fragments_list and (fragments_list[0][1] > frag_i + 1):
msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
self.report_warning(msg)
self._finish_frag_download(ctx)
for frag_file in frags_filenames:
os.remove(encodeFilename(frag_file))
return True
```
#### File: youtube_dl/extractor/cinemassacre.py
```python
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import ExtractorError
from .screenwavemedia import ScreenwaveMediaIE
class CinemassacreIE(InfoExtractor):
_VALID_URL = 'https?://(?:www\.)?cinemassacre\.com/(?P<date_y>[0-9]{4})/(?P<date_m>[0-9]{2})/(?P<date_d>[0-9]{2})/(?P<display_id>[^?#/]+)'
_TESTS = [
{
'url': 'http://cinemassacre.com/2012/11/10/avgn-the-movie-trailer/',
'md5': 'fde81fbafaee331785f58cd6c0d46190',
'info_dict': {
'id': 'Cinemassacre-19911',
'ext': 'mp4',
'upload_date': '20121110',
'title': '“Angry Video Game Nerd: The Movie” – Trailer',
'description': 'md5:fb87405fcb42a331742a0dce2708560b',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
'url': 'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
'md5': 'd72f10cd39eac4215048f62ab477a511',
'info_dict': {
'id': 'Cinemassacre-521be8ef82b16',
'ext': 'mp4',
'upload_date': '20131002',
'title': 'The Mummy’s Hand (1940)',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
# Youtube embedded video
'url': 'http://cinemassacre.com/2006/12/07/chronologically-confused-about-bad-movie-and-video-game-sequel-titles/',
'md5': 'ec9838a5520ef5409b3e4e42fcb0a3b9',
'info_dict': {
'id': 'OEVzPCY2T-g',
'ext': 'webm',
'title': 'AVGN: Chronologically Confused about Bad Movie and Video Game Sequel Titles',
'upload_date': '20061207',
'uploader': 'Cinemassacre',
'uploader_id': 'JamesNintendoNerd',
'description': 'md5:784734696c2b8b7f4b8625cc799e07f6',
}
},
{
# Youtube embedded video
'url': 'http://cinemassacre.com/2006/09/01/mckids/',
'md5': '7393c4e0f54602ad110c793eb7a6513a',
'info_dict': {
'id': 'FnxsNhuikpo',
'ext': 'webm',
'upload_date': '20060901',
'uploader': 'Cinemassacre Extra',
'description': 'md5:de9b751efa9e45fbaafd9c8a1123ed53',
'uploader_id': 'Cinemassacre',
'title': 'AVGN: McKids',
}
},
{
'url': 'http://cinemassacre.com/2015/05/25/mario-kart-64-nintendo-64-james-mike-mondays/',
'md5': '1376908e49572389e7b06251a53cdd08',
'info_dict': {
'id': 'Cinemassacre-555779690c440',
'ext': 'mp4',
'description': 'Let’s Play Mario Kart 64 !! Mario Kart 64 is a classic go-kart racing game released for the Nintendo 64 (N64). Today James & Mike do 4 player Battle Mode with Kyle and Bootsy!',
'title': 'Mario Kart 64 (Nintendo 64) James & Mike Mondays',
'upload_date': '20150525',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
video_date = mobj.group('date_y') + mobj.group('date_m') + mobj.group('date_d')
webpage = self._download_webpage(url, display_id)
playerdata_url = self._search_regex(
[
ScreenwaveMediaIE.EMBED_PATTERN,
r'<iframe[^>]+src="(?P<url>(?:https?:)?//(?:[^.]+\.)?youtube\.com/.+?)"',
],
webpage, 'player data URL', default=None, group='url')
if not playerdata_url:
raise ExtractorError('Unable to find player data')
video_title = self._html_search_regex(
r'<title>(?P<title>.+?)\|', webpage, 'title')
video_description = self._html_search_regex(
r'<div class="entry-content">(?P<description>.+?)</div>',
webpage, 'description', flags=re.DOTALL, fatal=False)
video_thumbnail = self._og_search_thumbnail(webpage)
return {
'_type': 'url_transparent',
'display_id': display_id,
'title': video_title,
'description': video_description,
'upload_date': video_date,
'thumbnail': video_thumbnail,
'url': playerdata_url,
}
```
#### File: youtube_dl/extractor/comcarcoff.py
```python
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
int_or_none,
parse_duration,
parse_iso8601,
)
class ComCarCoffIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?comediansincarsgettingcoffee\.com/(?P<id>[a-z0-9\-]*)'
_TESTS = [{
'url': 'http://comediansincarsgettingcoffee.com/miranda-sings-happy-thanksgiving-miranda/',
'info_dict': {
'id': '2494164',
'ext': 'mp4',
'upload_date': '20141127',
'timestamp': 1417107600,
'duration': 1232,
'title': 'Happy Thanksgiving Miranda',
'description': '<NAME> and his special guest Miranda Sings cruise around town in search of coffee, complaining and apologizing along the way.',
},
'params': {
'skip_download': 'requires ffmpeg',
}
}]
def _real_extract(self, url):
display_id = self._match_id(url)
if not display_id:
display_id = 'comediansincarsgettingcoffee.com'
webpage = self._download_webpage(url, display_id)
full_data = self._parse_json(
self._search_regex(
r'window\.app\s*=\s*({.+?});\n', webpage, 'full data json'),
display_id)['videoData']
display_id = full_data['activeVideo']['video']
video_data = full_data.get('videos', {}).get(display_id) or full_data['singleshots'][display_id]
video_id = compat_str(video_data['mediaId'])
thumbnails = [{
'url': video_data['images']['thumb'],
}, {
'url': video_data['images']['poster'],
}]
timestamp = int_or_none(video_data.get('pubDateTime')) or parse_iso8601(
video_data.get('pubDate'))
duration = int_or_none(video_data.get('durationSeconds')) or parse_duration(
video_data.get('duration'))
return {
'_type': 'url_transparent',
'url': 'crackle:%s' % video_id,
'id': video_id,
'display_id': display_id,
'title': video_data['title'],
'description': video_data.get('description'),
'timestamp': timestamp,
'duration': duration,
'thumbnails': thumbnails,
'season_number': int_or_none(video_data.get('season')),
'episode_number': int_or_none(video_data.get('episode')),
'webpage_url': 'http://comediansincarsgettingcoffee.com/%s' % (video_data.get('urlSlug', video_data.get('slug'))),
}
```
#### File: youtube_dl/extractor/discovery.py
```python
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_duration,
parse_iso8601,
)
from ..compat import compat_str
class DiscoveryIE(InfoExtractor):
_VALID_URL = r'''(?x)http://(?:www\.)?(?:
discovery|
investigationdiscovery|
discoverylife|
animalplanet|
ahctv|
destinationamerica|
sciencechannel|
tlc|
velocity
)\.com/(?:[^/]+/)*(?P<id>[^./?#]+)'''
_TESTS = [{
'url': 'http://www.discovery.com/tv-shows/mythbusters/videos/mission-impossible-outtakes.htm',
'info_dict': {
'id': '20769',
'ext': 'mp4',
'title': 'Mission Impossible Outtakes',
'description': ('Watch <NAME> and <NAME> practice being'
' each other -- to the point of confusing Jamie\'s dog -- and '
'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s'
' back.'),
'duration': 156,
'timestamp': 1302032462,
'upload_date': '20110405',
},
'params': {
'skip_download': True, # requires ffmpeg
}
}, {
'url': 'http://www.discovery.com/tv-shows/mythbusters/videos/mythbusters-the-simpsons',
'info_dict': {
'id': 'mythbusters-the-simpsons',
'title': 'MythBusters: The Simpsons',
},
'playlist_mincount': 10,
}, {
'url': 'http://www.animalplanet.com/longfin-eels-maneaters/',
'info_dict': {
'id': '78326',
'ext': 'mp4',
'title': 'Longfin Eels: Maneaters?',
'description': '<NAME> tests whether or not New Zealand\'s longfin eels are man-eaters by covering himself in fish guts and getting in the water with them.',
'upload_date': '20140725',
'timestamp': 1406246400,
'duration': 116,
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
info = self._download_json(url + '?flat=1', display_id)
video_title = info.get('playlist_title') or info.get('video_title')
entries = [{
'id': compat_str(video_info['id']),
'formats': self._extract_m3u8_formats(
video_info['src'], display_id, 'mp4', 'm3u8_native', m3u8_id='hls',
note='Download m3u8 information for video %d' % (idx + 1)),
'title': video_info['title'],
'description': video_info.get('description'),
'duration': parse_duration(video_info.get('video_length')),
'webpage_url': video_info.get('href') or video_info.get('url'),
'thumbnail': video_info.get('thumbnailURL'),
'alt_title': video_info.get('secondary_title'),
'timestamp': parse_iso8601(video_info.get('publishedDate')),
} for idx, video_info in enumerate(info['playlist'])]
return self.playlist_result(entries, display_id, video_title)
```
#### File: youtube_dl/extractor/fox.py
```python
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import smuggle_url
class FOXIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?fox\.com/watch/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.fox.com/watch/255180355939/7684182528',
'md5': 'ebd296fcc41dd4b19f8115d8461a3165',
'info_dict': {
'id': '255180355939',
'ext': 'mp4',
'title': 'Official Trailer: Gotham',
'description': 'Tracing the rise of the great DC Comics Super-Villains and vigilantes, Gotham reveals an entirely new chapter that has never been told.',
'duration': 129,
},
'add_ie': ['ThePlatform'],
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
release_url = self._parse_json(self._search_regex(
r'"fox_pdk_player"\s*:\s*({[^}]+?})', webpage, 'fox_pdk_player'),
video_id)['release_url'] + '&switch=http'
return {
'_type': 'url_transparent',
'ie_key': 'ThePlatform',
'url': smuggle_url(release_url, {'force_smil_url': True}),
'id': video_id,
}
```
#### File: youtube_dl/extractor/franceinter.py
```python
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import int_or_none
class FranceInterIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?franceinter\.fr/player/reecouter\?play=(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.franceinter.fr/player/reecouter?play=793962',
'md5': '4764932e466e6f6c79c317d2e74f6884',
'info_dict': {
'id': '793962',
'ext': 'mp3',
'title': 'L’Histoire dans les jeux vidéo',
'description': 'md5:7e93ddb4451e7530022792240a3049c7',
'timestamp': 1387369800,
'upload_date': '20131218',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
path = self._search_regex(
r'<a id="player".+?href="([^"]+)"', webpage, 'video url')
video_url = 'http://www.franceinter.fr/' + path
title = self._html_search_regex(
r'<span class="title-diffusion">(.+?)</span>', webpage, 'title')
description = self._html_search_regex(
r'<span class="description">(.*?)</span>',
webpage, 'description', fatal=False)
timestamp = int_or_none(self._search_regex(
r'data-date="(\d+)"', webpage, 'upload date', fatal=False))
return {
'id': video_id,
'title': title,
'description': description,
'timestamp': timestamp,
'formats': [{
'url': video_url,
'vcodec': 'none',
}],
}
```
#### File: youtube_dl/extractor/funimation.py
```python
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
clean_html,
determine_ext,
encode_dict,
int_or_none,
sanitized_Request,
ExtractorError,
urlencode_postdata
)
class FunimationIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?funimation\.com/shows/[^/]+/videos/(?:official|promotional)/(?P<id>[^/?#&]+)'
_NETRC_MACHINE = 'funimation'
_TESTS = [{
'url': 'http://www.funimation.com/shows/air/videos/official/breeze',
'info_dict': {
'id': '658',
'display_id': 'breeze',
'ext': 'mp4',
'title': 'Air - 1 - Breeze',
'description': 'md5:1769f43cd5fc130ace8fd87232207892',
'thumbnail': 're:https?://.*\.jpg',
},
}, {
'url': 'http://www.funimation.com/shows/hacksign/videos/official/role-play',
'info_dict': {
'id': '31128',
'display_id': 'role-play',
'ext': 'mp4',
'title': '.hack//SIGN - 1 - Role Play',
'description': 'md5:b602bdc15eef4c9bbb201bb6e6a4a2dd',
'thumbnail': 're:https?://.*\.jpg',
},
}, {
'url': 'http://www.funimation.com/shows/attack-on-titan-junior-high/videos/promotional/broadcast-dub-preview',
'info_dict': {
'id': '9635',
'display_id': 'broadcast-dub-preview',
'ext': 'mp4',
'title': 'Attack on Titan: Junior High - Broadcast Dub Preview',
'description': 'md5:f8ec49c0aff702<PASSWORD>',
'thumbnail': 're:https?://.*\.(?:jpg|png)',
},
}]
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
data = urlencode_postdata(encode_dict({
'email_field': username,
'password_field': password,
}))
login_request = sanitized_Request('http://www.funimation.com/login', data, headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0',
'Content-Type': 'application/x-www-form-urlencoded'
})
login_page = self._download_webpage(
login_request, None, 'Logging in as %s' % username)
if any(p in login_page for p in ('funimation.com/logout', '>Log Out<')):
return
error = self._html_search_regex(
r'(?s)<div[^>]+id=["\']errorMessages["\'][^>]*>(.+?)</div>',
login_page, 'error messages', default=None)
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
def _real_initialize(self):
self._login()
def _real_extract(self, url):
display_id = self._match_id(url)
errors = []
formats = []
ERRORS_MAP = {
'ERROR_MATURE_CONTENT_LOGGED_IN': 'matureContentLoggedIn',
'ERROR_MATURE_CONTENT_LOGGED_OUT': 'matureContentLoggedOut',
'ERROR_SUBSCRIPTION_LOGGED_OUT': 'subscriptionLoggedOut',
'ERROR_VIDEO_EXPIRED': 'videoExpired',
'ERROR_TERRITORY_UNAVAILABLE': 'territoryUnavailable',
'SVODBASIC_SUBSCRIPTION_IN_PLAYER': 'basicSubscription',
'SVODNON_SUBSCRIPTION_IN_PLAYER': 'nonSubscription',
'ERROR_PLAYER_NOT_RESPONDING': 'playerNotResponding',
'ERROR_UNABLE_TO_CONNECT_TO_CDN': 'unableToConnectToCDN',
'ERROR_STREAM_NOT_FOUND': 'streamNotFound',
}
USER_AGENTS = (
# PC UA is served with m3u8 that provides some bonus lower quality formats
('pc', 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0'),
# Mobile UA allows to extract direct links and also does not fail when
# PC UA fails with hulu error (e.g.
# http://www.funimation.com/shows/hacksign/videos/official/role-play)
('mobile', 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36'),
)
for kind, user_agent in USER_AGENTS:
request = sanitized_Request(url)
request.add_header('User-Agent', user_agent)
webpage = self._download_webpage(
request, display_id, 'Downloading %s webpage' % kind)
playlist = self._parse_json(
self._search_regex(
r'var\s+playersData\s*=\s*(\[.+?\]);\n',
webpage, 'players data'),
display_id)[0]['playlist']
items = next(item['items'] for item in playlist if item.get('items'))
item = next(item for item in items if item.get('itemAK') == display_id)
error_messages = {}
video_error_messages = self._search_regex(
r'var\s+videoErrorMessages\s*=\s*({.+?});\n',
webpage, 'error messages', default=None)
if video_error_messages:
error_messages_json = self._parse_json(video_error_messages, display_id, fatal=False)
if error_messages_json:
for _, error in error_messages_json.items():
type_ = error.get('type')
description = error.get('description')
content = error.get('content')
if type_ == 'text' and description and content:
error_message = ERRORS_MAP.get(description)
if error_message:
error_messages[error_message] = content
for video in item.get('videoSet', []):
auth_token = video.get('authToken')
if not auth_token:
continue
funimation_id = video.get('FUNImationID') or video.get('videoId')
preference = 1 if video.get('languageMode') == 'dub' else 0
if not auth_token.startswith('?'):
auth_token = '?%s' % auth_token
for quality, height in (('sd', 480), ('hd', 720), ('hd1080', 1080)):
format_url = video.get('%sUrl' % quality)
if not format_url:
continue
if not format_url.startswith(('http', '//')):
errors.append(format_url)
continue
if determine_ext(format_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url + auth_token, display_id, 'mp4', entry_protocol='m3u8_native',
preference=preference, m3u8_id='%s-hls' % funimation_id, fatal=False))
else:
tbr = int_or_none(self._search_regex(
r'-(\d+)[Kk]', format_url, 'tbr', default=None))
formats.append({
'url': format_url + auth_token,
'format_id': '%s-http-%dp' % (funimation_id, height),
'height': height,
'tbr': tbr,
'preference': preference,
})
if not formats and errors:
raise ExtractorError(
'%s returned error: %s'
% (self.IE_NAME, clean_html(error_messages.get(errors[0], errors[0]))),
expected=True)
self._sort_formats(formats)
title = item['title']
artist = item.get('artist')
if artist:
title = '%s - %s' % (artist, title)
description = self._og_search_description(webpage) or item.get('description')
thumbnail = self._og_search_thumbnail(webpage) or item.get('posterUrl')
video_id = item.get('itemId') or display_id
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats,
}
```
#### File: youtube_dl/extractor/noz.py
```python
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import (
int_or_none,
xpath_text,
)
class NozIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?noz\.de/video/(?P<id>[0-9]+)/'
_TESTS = [{
'url': 'http://www.noz.de/video/25151/32-Deutschland-gewinnt-Badminton-Lnderspiel-in-Melle',
'info_dict': {
'id': '25151',
'ext': 'mp4',
'duration': 215,
'title': '3:2 - Deutschland gewinnt Badminton-Länderspiel in Melle',
'description': 'Vor rund 370 Zuschauern gewinnt die deutsche Badminton-Nationalmannschaft am Donnerstag ein EM-Vorbereitungsspiel gegen Frankreich in Melle. Video <NAME>.',
'thumbnail': 're:^http://.*\.jpg',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
description = self._og_search_description(webpage)
edge_url = self._html_search_regex(
r'<script\s+(?:type="text/javascript"\s+)?src="(.*?/videojs_.*?)"',
webpage, 'edge URL')
edge_content = self._download_webpage(edge_url, 'meta configuration')
config_url_encoded = self._search_regex(
r'so\.addVariable\("config_url","[^,]*,(.*?)"',
edge_content, 'config URL'
)
config_url = compat_urllib_parse_unquote(config_url_encoded)
doc = self._download_xml(config_url, 'video configuration')
title = xpath_text(doc, './/title')
thumbnail = xpath_text(doc, './/article/thumbnail/url')
duration = int_or_none(xpath_text(
doc, './/article/movie/file/duration'))
formats = []
for qnode in doc.findall('.//article/movie/file/qualities/qual'):
video_node = qnode.find('./html_urls/video_url[@format="video/mp4"]')
if video_node is None:
continue # auto
formats.append({
'url': video_node.text,
'format_name': xpath_text(qnode, './name'),
'format_id': xpath_text(qnode, './id'),
'height': int_or_none(xpath_text(qnode, './height')),
'width': int_or_none(xpath_text(qnode, './width')),
'tbr': int_or_none(xpath_text(qnode, './bitrate'), scale=1000),
})
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'title': title,
'duration': duration,
'description': description,
'thumbnail': thumbnail,
}
```
#### File: youtube_dl/extractor/radiobremen.py
```python
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import parse_duration
class RadioBremenIE(InfoExtractor):
_VALID_URL = r'http?://(?:www\.)?radiobremen\.de/mediathek/(?:index\.html)?\?id=(?P<id>[0-9]+)'
IE_NAME = 'radiobremen'
_TEST = {
'url': 'http://www.radiobremen.de/mediathek/index.html?id=114720',
'info_dict': {
'id': '114720',
'ext': 'mp4',
'duration': 1685,
'width': 512,
'title': 'buten un binnen vom 22. Dezember',
'thumbnail': 're:https?://.*\.jpg$',
'description': 'Unter anderem mit diesen Themen: 45 Flüchtlinge sind in Worpswede angekommen +++ Freies Internet für alle: Bremer arbeiten an einem flächendeckenden W-Lan-Netzwerk +++ Aktivisten kämpfen für das Unibad +++ So war das Wetter 2014 +++',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
meta_url = 'http://www.radiobremen.de/apps/php/mediathek/metadaten.php?id=%s' % video_id
meta_doc = self._download_webpage(
meta_url, video_id, 'Downloading metadata')
title = self._html_search_regex(
r'<h1.*>(?P<title>.+)</h1>', meta_doc, 'title')
description = self._html_search_regex(
r'<p>(?P<description>.*)</p>', meta_doc, 'description', fatal=False)
duration = parse_duration(self._html_search_regex(
r'Länge:</td>\s+<td>(?P<duration>[0-9]+:[0-9]+)</td>',
meta_doc, 'duration', fatal=False))
page_doc = self._download_webpage(
url, video_id, 'Downloading video information')
mobj = re.search(
r"ardformatplayerclassic\(\'playerbereich\',\'(?P<width>[0-9]+)\',\'.*\',\'(?P<video_id>[0-9]+)\',\'(?P<secret>[0-9]+)\',\'(?P<thumbnail>.+)\',\'\'\)",
page_doc)
video_url = (
"http://dl-ondemand.radiobremen.de/mediabase/%s/%s_%s_%s.mp4" %
(video_id, video_id, mobj.group("secret"), mobj.group('width')))
formats = [{
'url': video_url,
'ext': 'mp4',
'width': int(mobj.group('width')),
}]
return {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'formats': formats,
'thumbnail': mobj.group('thumbnail'),
}
```
#### File: youtube_dl/extractor/rai.py
```python
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urlparse,
)
from ..utils import (
ExtractorError,
determine_ext,
parse_duration,
unified_strdate,
int_or_none,
xpath_text,
)
class RaiTVIE(InfoExtractor):
_VALID_URL = r'http://(?:.+?\.)?(?:rai\.it|rai\.tv|rainews\.it)/dl/(?:[^/]+/)+media/.+?-(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})(?:-.+?)?\.html'
_TESTS = [
{
'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-cb27157f-9dd0-4aee-b788-b1f67643a391.html',
'md5': '96382709b61dd64a6b88e0f791e6df4c',
'info_dict': {
'id': 'cb27157f-9dd0-4aee-b788-b1f67643a391',
'ext': 'flv',
'title': 'Report del 07/04/2014',
'description': 'md5:f27c544694cacb46a078db84ec35d2d9',
'upload_date': '20140407',
'duration': 6160,
}
},
{
'url': 'http://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html',
'md5': 'd9751b78eac9710d62c2447b224dea39',
'info_dict': {
'id': '04a9f4bd-b563-40cf-82a6-aad3529cb4a9',
'ext': 'flv',
'title': 'TG PRIMO TEMPO',
'upload_date': '20140612',
'duration': 1758,
},
},
{
'url': 'http://www.rainews.it/dl/rainews/media/state-of-the-net-Antonella-La-Carpia-regole-virali-7aafdea9-0e5d-49d5-88a6-7e65da67ae13.html',
'md5': '35cf7c229f22eeef43e48b5cf923bef0',
'info_dict': {
'id': '7aafdea9-0e5d-49d5-88a6-7e65da67ae13',
'ext': 'mp4',
'title': 'State of the Net, Antonella La Carpia: regole virali',
'description': 'md5:b0ba04a324126903e3da7763272ae63c',
'upload_date': '20140613',
},
'skip': 'Error 404',
},
{
'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-b4a49761-e0cc-4b14-8736-2729f6f73132-tg2.html',
'info_dict': {
'id': 'b4a49761-e0cc-4b14-8736-2729f6f73132',
'ext': 'mp4',
'title': 'Alluvione in Sardegna e dissesto idrogeologico',
'description': 'Edizione delle ore 20:30 ',
},
'skip': 'invalid urls',
},
{
'url': 'http://www.ilcandidato.rai.it/dl/ray/media/Il-Candidato---Primo-episodio-Le-Primarie-28e5525a-b495-45e8-a7c3-bc48ba45d2b6.html',
'md5': '496ab63e420574447f70d02578333437',
'info_dict': {
'id': '28e5525a-b495-45e8-a7c3-bc48ba45d2b6',
'ext': 'flv',
'title': 'Il Candidato - Primo episodio: "Le Primarie"',
'description': 'md5:364b604f7db50594678f483353164fb8',
'upload_date': '20140923',
'duration': 386,
}
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
media = self._download_json(
'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-%s.html?json' % video_id,
video_id, 'Downloading video JSON')
thumbnails = []
for image_type in ('image', 'image_medium', 'image_300'):
thumbnail_url = media.get(image_type)
if thumbnail_url:
thumbnails.append({
'url': thumbnail_url,
})
subtitles = []
formats = []
media_type = media['type']
if 'Audio' in media_type:
formats.append({
'format_id': media.get('formatoAudio'),
'url': media['audioUrl'],
'ext': media.get('formatoAudio'),
})
elif 'Video' in media_type:
def fix_xml(xml):
return xml.replace(' tag elementi', '').replace('>/', '</')
relinker = self._download_xml(
media['mediaUri'] + '&output=43',
video_id, transform_source=fix_xml)
has_subtitle = False
for element in relinker.findall('element'):
media_url = xpath_text(element, 'url')
ext = determine_ext(media_url)
content_type = xpath_text(element, 'content-type')
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
media_url + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44',
video_id, f4m_id='hds', fatal=False))
elif ext == 'stl':
has_subtitle = True
elif content_type.startswith('video/'):
bitrate = int_or_none(xpath_text(element, 'bitrate'))
formats.append({
'url': media_url,
'tbr': bitrate if bitrate > 0 else None,
'format_id': 'http-%d' % bitrate if bitrate > 0 else 'http',
})
elif content_type.startswith('image/'):
thumbnails.append({
'url': media_url,
})
self._sort_formats(formats)
if has_subtitle:
webpage = self._download_webpage(url, video_id)
subtitles = self._get_subtitles(video_id, webpage)
else:
raise ExtractorError('not a media file')
return {
'id': video_id,
'title': media['name'],
'description': media.get('desc'),
'thumbnails': thumbnails,
'uploader': media.get('author'),
'upload_date': unified_strdate(media.get('date')),
'duration': parse_duration(media.get('length')),
'formats': formats,
'subtitles': subtitles,
}
def _get_subtitles(self, video_id, webpage):
subtitles = {}
m = re.search(r'<meta name="closedcaption" content="(?P<captions>[^"]+)"', webpage)
if m:
captions = m.group('captions')
STL_EXT = '.stl'
SRT_EXT = '.srt'
if captions.endswith(STL_EXT):
captions = captions[:-len(STL_EXT)] + SRT_EXT
subtitles['it'] = [{
'ext': 'srt',
'url': 'http://www.rai.tv%s' % compat_urllib_parse.quote(captions),
}]
return subtitles
class RaiIE(InfoExtractor):
_VALID_URL = r'http://(?:.+?\.)?(?:rai\.it|rai\.tv|rainews\.it)/dl/.+?-(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})(?:-.+?)?\.html'
_TESTS = [
{
'url': 'http://www.report.rai.it/dl/Report/puntata/ContentItem-0c7a664b-d0f4-4b2c-8835-3f82e46f433e.html',
'md5': 'e0e7a8a131e249d1aa0ebf270d1d8db7',
'info_dict': {
'id': '59d69d28-6bb6-409d-a4b5-ed44096560af',
'ext': 'flv',
'title': 'Il pacco',
'description': 'md5:4b1afae1364115ce5d78ed83cd2e5b3a',
'upload_date': '20141221',
},
}
]
@classmethod
def suitable(cls, url):
return False if RaiTVIE.suitable(url) else super(RaiIE, cls).suitable(url)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
iframe_url = self._search_regex(
[r'<iframe[^>]+src="([^"]*/dl/[^"]+\?iframe\b[^"]*)"',
r'drawMediaRaiTV\(["\'](.+?)["\']'],
webpage, 'iframe')
if not iframe_url.startswith('http'):
iframe_url = compat_urlparse.urljoin(url, iframe_url)
return self.url_result(iframe_url)
```
#### File: youtube_dl/extractor/trollvids.py
```python
from __future__ import unicode_literals
import re
from .nuevo import NuevoBaseIE
class TrollvidsIE(NuevoBaseIE):
_VALID_URL = r'http://(?:www\.)?trollvids\.com/video/(?P<id>\d+)/(?P<display_id>[^/?#&]+)'
IE_NAME = 'trollvids'
_TEST = {
'url': 'http://trollvids.com/video/2349002/%E3%80%90MMD-R-18%E3%80%91%E3%82%AC%E3%83%BC%E3%83%AB%E3%83%95%E3%83%AC%E3%83%B3%E3%83%89-carrymeoff',
'md5': '1d53866b2c514b23ed69e4352fdc9839',
'info_dict': {
'id': '2349002',
'ext': 'mp4',
'title': '【MMD R-18】ガールフレンド carry_me_off',
'age_limit': 18,
'duration': 216.78,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
info = self._extract_nuevo(
'http://trollvids.com/nuevo/player/config.php?v=%s' % video_id,
video_id)
info.update({
'display_id': display_id,
'age_limit': 18
})
return info
```
#### File: youtube_dl/extractor/ustudio.py
```python
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
)
class UstudioIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www|v1)\.)?ustudio\.com/video/(?P<id>[^/]+)/(?P<display_id>[^/?#&]+)'
_TEST = {
'url': 'http://ustudio.com/video/Uxu2my9bgSph/san_francisco_golden_gate_bridge',
'md5': '58bbfca62125378742df01fc2abbdef6',
'info_dict': {
'id': 'Uxu2my9bgSph',
'display_id': 'san_francisco_golden_gate_bridge',
'ext': 'mp4',
'title': 'San Francisco: Golden Gate Bridge',
'description': 'md5:23925500697f2c6d4830e387ba51a9be',
'thumbnail': 're:^https?://.*\.jpg$',
'upload_date': '20111107',
'uploader': '<NAME>',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
config = self._download_xml(
'http://v1.ustudio.com/embed/%s/ustudio/config.xml' % video_id,
display_id)
def extract(kind):
return [{
'url': item.attrib['url'],
'width': int_or_none(item.get('width')),
'height': int_or_none(item.get('height')),
} for item in config.findall('./qualities/quality/%s' % kind) if item.get('url')]
formats = extract('video')
self._sort_formats(formats)
webpage = self._download_webpage(url, display_id)
title = self._og_search_title(webpage)
upload_date = unified_strdate(self._search_regex(
r'(?s)Uploaded by\s*.+?\s*on\s*<span>([^<]+)</span>',
webpage, 'upload date', fatal=False))
uploader = self._search_regex(
r'Uploaded by\s*<a[^>]*>([^<]+)<',
webpage, 'uploader', fatal=False)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': self._og_search_description(webpage),
'thumbnails': extract('image'),
'upload_date': upload_date,
'uploader': uploader,
'formats': formats,
}
```
#### File: youtube_dl/extractor/zippcast.py
```python
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
str_to_int,
)
class ZippCastIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?zippcast\.com/(?:video/|videoview\.php\?.*\bvplay=)(?P<id>[0-9a-zA-Z]+)'
_TESTS = [{
# m3u8, hq direct link
'url': 'http://www.zippcast.com/video/c9cfd5c7e44dbc29c81',
'md5': '5ea0263b5606866c4d6cda0fc5e8c6b6',
'info_dict': {
'id': 'c9cfd5c7e44dbc29c81',
'ext': 'mp4',
'title': '[Vinesauce] Vinny - Digital Space Traveler',
'description': 'Muted on youtube, but now uploaded in it\'s original form.',
'thumbnail': 're:^https?://.*\.jpg$',
'uploader': 'vinesauce',
'view_count': int,
'categories': ['Entertainment'],
'tags': list,
},
}, {
# f4m, lq ipod direct link
'url': 'http://www.zippcast.com/video/b79c0a233e9c6581775',
'only_matching': True,
}, {
'url': 'http://www.zippcast.com/videoview.php?vplay=c9cfd5c7e44dbc29c81&auto=no',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://www.zippcast.com/video/%s' % video_id, video_id)
formats = []
video_url = self._search_regex(
r'<source[^>]+src=(["\'])(?P<url>.+?)\1', webpage,
'video url', default=None, group='url')
if video_url:
formats.append({
'url': video_url,
'format_id': 'http',
'preference': 0, # direct link is almost always of worse quality
})
src_url = self._search_regex(
r'src\s*:\s*(?:escape\()?(["\'])(?P<url>http://.+?)\1',
webpage, 'src', default=None, group='url')
ext = determine_ext(src_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
src_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
src_url, video_id, f4m_id='hds', fatal=False))
self._sort_formats(formats)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage) or self._html_search_meta(
'description', webpage)
uploader = self._search_regex(
r'<a[^>]+href="https?://[^/]+/profile/[^>]+>([^<]+)</a>',
webpage, 'uploader', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
view_count = str_to_int(self._search_regex(
r'>([\d,.]+) views!', webpage, 'view count', fatal=False))
categories = re.findall(
r'<a[^>]+href="https?://[^/]+/categories/[^"]+">([^<]+),?<',
webpage)
tags = re.findall(
r'<a[^>]+href="https?://[^/]+/search/tags/[^"]+">([^<]+),?<',
webpage)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'view_count': view_count,
'categories': categories,
'tags': tags,
'formats': formats,
}
``` |
{
"source": "jianglianEin/LanTools",
"score": 3
} |
#### File: LanTools/python/packet.py
```python
from struct import *
import json
import time
def packData(cmd, string, mark=233):
# 封装数据包
bytestring = bytes(string, encoding='utf-8')
length = len(bytestring)
str_pack = pack('!iii%ds' % length, mark, cmd, length, bytestring)
return str_pack
def unpackData(string):
# 解析数据包
mark, cmd, length = unpack('!iii', string[0:12])
byteData = unpack('{length}s'.format(length=length), string[12:12 + length])[0]
data = str(byteData, encoding='utf-8')
return cmd,data
def packIpc(d):
jsonStr = json.dumps(d)
return jsonStr
def unpackIpc(jsonStr):
d = json.loads(jsonStr)
return d
``` |
{
"source": "jianglin521/EverydayWechat",
"score": 3
} |
#### File: control/moviebox/maoyan_movie_box.py
```python
import requests
from datetime import datetime
from everyday_wechat.utils.common import SPIDER_HEADERS
def get_maoyan_movie_box(date='', is_expired=False):
"""
获取特定日期的实时票房日期
https://piaofang.maoyan.com/second-box?beginDate=20190830#指定日期的节假日及万年历信息
:param date: str 日期 格式 yyyyMMdd
:param is_expired
:rtype str
"""
date_ = date or datetime.now().strftime('%Y%m%d')
print('获取 {} 的票房数据...'.format(date_))
# try:
url = 'https://piaofang.maoyan.com/second-box?beginDate={}'.format(date_)
resp = requests.get(url, headers=SPIDER_HEADERS)
# print(resp)
if resp.status_code == 200:
# print(resp.text)
content_dict = resp.json()
if content_dict['success']:
data_dict = content_dict['data']
total_box_info = data_dict['totalBoxInfo']
box_list = data_dict['list']
box_info_list = []
for i, r in enumerate(box_list[:10]):
movice_name = r['movieName']
box_info = r['boxInfo']
sumBoxInfo = r['sumBoxInfo']
box_info_list.append('{}.《{}》({}万,累积:{})'.format(str(i + 1), movice_name, box_info, sumBoxInfo))
cur_date = datetime.strptime(date_, '%Y%m%d').strftime('%Y{}%m{}%d{}').format('年', '月', '日')
return_text = "{cur_date} {box_name}\n当日总票房:{total_box_info}万\n{box_info}".format(
cur_date=cur_date,
box_name="实时票房" if is_expired else "当日票房",
total_box_info=total_box_info,
box_info='\n'.join(box_info_list)
)
return return_text
else:
print('获取票房失败:{}'.format(content_dict['msg']))
return None
print('获取票房失败。')
# except Exception as exception:
# print(str(exception))
return None
# __date = '20190925'
# dd = get_maoyan_movie_box(__date, is_expired=False)
# print(dd)
```
#### File: control/weather/rtweather.py
```python
import requests
__all__ = ['get_rttodayweather']
# {"code":1,"msg":"数据返回成功","data":{"address":"广西壮族自治区 桂林市 全州县",
# "cityCode":"450324","temp":"26℃","weather":"晴","windDirection":"东北","windPower":"≤3级",
# "humidity":"58%","reportTime":"2019-06-14 10:49:37"}}
def get_rttodayweather(cityname, app_token):
"""
获取特定城市今日天气
https://github.com/MZCretin/RollToolsApi#获取特定城市今日天气
:param cityname:str 传入你需要查询的城市,请尽量传入完整值,否则系统会自行匹配,可能会有误差
:return:str 天气(2019-06-12 星期三 晴 南风 3-4级 高温 22.0℃ 低温 18.0℃ 愿你拥有比阳光明媚的心情)
"""
print('获取 {} 的天气...'.format(cityname))
try:
resp = requests.get('https://www.mxnzp.com/api/weather/forecast/{}?app_id={}&app_secret={}'.format(cityname, app_token['app_id'], app_token['app_secret']))
if resp.status_code == 200:
content_dict = resp.json()
if content_dict['code'] == 1:
data_dict = content_dict['data']
# print(data_dict, 'data_dict')
address = data_dict['address'].strip()
forecast = data_dict['forecasts'][0]
list_data = []
for x in [address, forecast['date'], forecast['dayWeather'], forecast['dayTemp']]:
list_data.append(x)
return_text = ' '.join(list_data)
# print(return_text)
return return_text
else:
print('获取天气失败:{}'.format(content_dict['msg']))
# return None
print('获取天气失败。')
except Exception as exception:
print(str(exception))
# return None
# return None
get_today_weather = get_rttodayweather
if __name__ == '__main__':
cityname = '香港'
weather = get_today_weather(cityname)
pass
``` |
{
"source": "Jianglinhe/Spider",
"score": 3
} |
#### File: Spider/remote/qiubai_queue_spider.py
```python
import requests
from lxml import etree
import json
import threading
from queue import Queue
class QiubaiSpider(object):
def __init__(self):
self.url_temp = "https://www.qiushibaike.com/text/page/{}/"
self.headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
}
self.url_queue = Queue()
self.html_queue = Queue()
self.content_queue = Queue()
def get_url_list(self):
# return [self.url_temp.format(i) for i in range(1, 14)]
for i in range(1, 14):
self.url_queue.put(self.url_temp.format(i))
def parse_url(self):
while True:
url = self.url_queue.get() # 从队列中获取url
print(url)
response = requests.get(url, headers=self.headers)
assert response.status_code == 200, "返回异常"
self.html_queue.put(response.content.decode("utf-8"))
self.url_queue.task_done() # get才会减一
def get_content_list(self): # 提取数据
while True:
html_str = self.html_queue.get()
html = etree.HTML(html_str)
div_list = html.xpath("//div[@id='content-left']/div") # 分组
conent_list = []
for div in div_list:
item = {}
item["content"] = [i.replace("\n", "") for i in div.xpath(".//div[@class='content']/span/text()")]
item["author_gender"] = div.xpath(".//div[contains(@class, 'articleGender')]/@class")
item["author_gender"] = item["author_gender"][0].split(" ")[-1].replace("Icon", "") if len(
item["author_gender"]) > 0 else None
item["author_age"] = div.xpath(".//div[contains(@class, 'articleGender')]/text()")
item["author_age"] = item["author_age"][0] if len(item["author_age"]) > 0 else None
item["author_img"] = div.xpath(".//div[@class='author clearfix']//img/@src")
item["author_img"] = "https:" + item["author_img"][0] if len(item["author_img"]) > 0 else None
item["stats_vote"] = div.xpath(".//span[@class='stats-vote']/i/text()")
item["stats_vote"] = item["stats_vote"][0] if len(item["stats_vote"]) > 0 else None
conent_list.append(item)
self.content_queue.put(conent_list)
self.html_queue.task_done()
def save_content_list(self):
while True:
content_list = self.content_queue.get() # get完成后都需要task_done()一下
with open("qiushibaike_queue.txt", 'a', encoding='utf-8') as f:
for content in content_list:
# print(content)
f.write(json.dumps(content, ensure_ascii=False)) # 转化为json字符串写入的时候,None被转化为null
f.write("\n")
print("保存成功....")
self.content_queue.task_done()
def run(self): # 实现主要的逻辑
thread_list = []
# 1.url_list
t_url = threading.Thread(target=self.get_url_list) # target=self.get_url_list调用函数不能加括号
thread_list.append(t_url)
# 2.遍历,发送请求,获取响应
for i in range(5): # 让获取响应这块使用多个线程
t_parse = threading.Thread(target=self.parse_url)
thread_list.append(t_parse)
# 3.提取数据
t_html = threading.Thread(target=self.get_content_list)
thread_list.append(t_html)
# 4.保存
t_save = threading.Thread(target=self.save_content_list)
thread_list.append(t_save)
for t in thread_list:
t.setDaemon(True) # 将子线程设置为守护线程,该线程不重要,主线程结束,子线程结束,必须在start低矮用之前调用
t.start()
for q in [self.url_queue, self.html_queue, self.content_queue]:
q.join() # 让主线程等待阻塞,等待队列的任务完成之后再完成
print("主线程结束")
if __name__ == '__main__':
qiuba = QiubaiSpider()
qiuba.run()
```
#### File: Spider/remote/qiushibaike_spider.py
```python
import requests
from lxml import etree
import json
class QiubaiSpider(object):
def __init__(self):
self.url_temp = "https://www.qiushibaike.com/text/page/{}/"
self.headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
}
self.session = requests.session()
def get_url_list(self):
return [self.url_temp.format(i) for i in range(1, 14)]
def parse_url(self, url):
print(url)
response = self.session.get(url, headers=self.headers)
assert response.status_code == 200, "返回异常"
return response.content.decode("utf-8")
def save_content_list(self, content_list):
with open("qiushibaike.txt", 'a', encoding='utf-8') as f:
for content in content_list:
# print(content)
f.write(json.dumps(content, ensure_ascii=False)) # 转化为json字符串写入的时候,None被转化为null
f.write("\n")
print("保存成功....")
def get_content_list(self, html_str): # 提取数据
html = etree.HTML(html_str)
div_list = html.xpath("//div[@id='content-left']/div") # 分组
conent_list = []
for div in div_list:
item = {}
item["content"] = [i.replace("\n", "") for i in div.xpath(".//div[@class='content']/span/text()")]
item["author_gender"] = div.xpath(".//div[contains(@class, 'articleGender')]/@class")
item["author_gender"] = item["author_gender"][0].split(" ")[-1].replace("Icon", "") if len(item["author_gender"])>0 else None
item["author_age"] = div.xpath(".//div[contains(@class, 'articleGender')]/text()")
item["author_age"] = item["author_age"][0] if len(item["author_age"])>0 else None
item["author_img"] = div.xpath(".//div[@class='author clearfix']//img/@src")
item["author_img"] = "https:"+item["author_img"][0] if len(item["author_img"])>0 else None
item["stats_vote"] = div.xpath(".//span[@class='stats-vote']/i/text()")
item["stats_vote"] = item["stats_vote"][0] if len(item["stats_vote"])>0 else None
conent_list.append(item)
return conent_list
def run(self): # 实现主要的逻辑
# 1.url_list
url_list = self.get_url_list()
# 2.遍历,发送请求,获取响应
for url in url_list:
html_str = self.parse_url(url)
# 3.提取数据
content_list = self.get_content_list(html_str)
# 4.保存
self.save_content_list(content_list)
if __name__ == '__main__':
qiuba = QiubaiSpider()
qiuba.run()
``` |
{
"source": "jianglong0156/chromium.src",
"score": 3
} |
#### File: android/pylib/apk_info.py
```python
import collections
import os
import re
import cmd_helper
def GetPackageNameForApk(apk_path):
"""Returns the package name of the apk file."""
aapt_output = cmd_helper.GetCmdOutput(
['aapt', 'dump', 'badging', apk_path]).split('\n')
package_name_re = re.compile(r'package: .*name=\'(\S*)\'')
for line in aapt_output:
m = package_name_re.match(line)
if m:
return m.group(1)
raise Exception('Failed to determine package name of %s' % apk_path)
class ApkInfo(object):
"""Helper class for inspecting APKs."""
def __init__(self, apk_path, jar_path):
self._PROGUARD_PATH = os.path.join(os.environ['ANDROID_SDK_ROOT'],
'tools/proguard/bin/proguard.sh')
if not os.path.exists(self._PROGUARD_PATH):
self._PROGUARD_PATH = os.path.join(os.environ['ANDROID_BUILD_TOP'],
'external/proguard/bin/proguard.sh')
self._PROGUARD_CLASS_RE = re.compile(r'\s*?- Program class:\s*([\S]+)$')
self._PROGUARD_METHOD_RE = re.compile(r'\s*?- Method:\s*(\S*)[(].*$')
self._PROGUARD_ANNOTATION_RE = re.compile(r'\s*?- Annotation \[L(\S*);\]:$')
self._PROGUARD_ANNOTATION_CONST_RE = (
re.compile(r'\s*?- Constant element value.*$'))
self._PROGUARD_ANNOTATION_VALUE_RE = re.compile(r'\s*?- \S+? \[(.*)\]$')
if not os.path.exists(apk_path):
raise Exception('%s not found, please build it' % apk_path)
self._apk_path = apk_path
if not os.path.exists(jar_path):
raise Exception('%s not found, please build it' % jar_path)
self._jar_path = jar_path
self._annotation_map = collections.defaultdict(list)
self._test_methods = []
self._Initialize()
def _Initialize(self):
proguard_output = cmd_helper.GetCmdOutput([self._PROGUARD_PATH,
'-injars', self._jar_path,
'-dontshrink',
'-dontoptimize',
'-dontobfuscate',
'-dontpreverify',
'-dump',
]).split('\n')
clazz = None
method = None
annotation = None
has_value = False
qualified_method = None
for line in proguard_output:
m = self._PROGUARD_CLASS_RE.match(line)
if m:
clazz = m.group(1).replace('/', '.') # Change package delim.
annotation = None
continue
m = self._PROGUARD_METHOD_RE.match(line)
if m:
method = m.group(1)
annotation = None
qualified_method = clazz + '#' + method
if method.startswith('test') and clazz.endswith('Test'):
self._test_methods += [qualified_method]
continue
m = self._PROGUARD_ANNOTATION_RE.match(line)
if m:
assert qualified_method
annotation = m.group(1).split('/')[-1] # Ignore the annotation package.
self._annotation_map[qualified_method].append(annotation)
has_value = False
continue
if annotation:
assert qualified_method
if not has_value:
m = self._PROGUARD_ANNOTATION_CONST_RE.match(line)
if m:
has_value = True
else:
m = self._PROGUARD_ANNOTATION_VALUE_RE.match(line)
if m:
value = m.group(1)
self._annotation_map[qualified_method].append(
annotation + ':' + value)
has_value = False
def _GetAnnotationMap(self):
return self._annotation_map
def _IsTestMethod(self, test):
class_name, method = test.split('#')
return class_name.endswith('Test') and method.startswith('test')
def GetApkPath(self):
return self._apk_path
def GetPackageName(self):
"""Returns the package name of this APK."""
return GetPackageNameForApk(self._apk_path)
def GetTestAnnotations(self, test):
"""Returns a list of all annotations for the given |test|. May be empty."""
if not self._IsTestMethod(test):
return []
return self._GetAnnotationMap()[test]
def _AnnotationsMatchFilters(self, annotation_filter_list, annotations):
"""Checks if annotations match any of the filters."""
if not annotation_filter_list:
return True
for annotation_filter in annotation_filter_list:
filters = annotation_filter.split('=')
if len(filters) == 2:
key = filters[0]
value_list = filters[1].split(',')
for value in value_list:
if key + ':' + value in annotations:
return True
elif annotation_filter in annotations:
return True
return False
def GetAnnotatedTests(self, annotation_filter_list):
"""Returns a list of all tests that match the given annotation filters."""
return [test for test, annotations in self._GetAnnotationMap().iteritems()
if self._IsTestMethod(test) and self._AnnotationsMatchFilters(
annotation_filter_list, annotations)]
def GetTestMethods(self):
"""Returns a list of all test methods in this apk as Class#testMethod."""
return self._test_methods
@staticmethod
def IsPythonDrivenTest(test):
return 'pythonDrivenTests' in test
```
#### File: sdk_tools/command/info.py
```python
import logging
import manifest_util
def Info(manifest, bundle_names):
valid_bundles = [bundle.name for bundle in manifest.GetBundles()]
valid_bundles = set(bundle_names) & set(valid_bundles)
invalid_bundles = set(bundle_names) - valid_bundles
if invalid_bundles:
logging.warn('Unknown bundle(s): %s\n' % (', '.join(invalid_bundles)))
for bundle_name in bundle_names:
if bundle_name not in valid_bundles:
continue
bundle = manifest.GetBundle(bundle_name)
print bundle.name
for key in sorted(bundle.iterkeys()):
value = bundle[key]
if key == manifest_util.ARCHIVES_KEY:
archive = bundle.GetHostOSArchive()
print ' Archive:'
if archive:
for archive_key in sorted(archive.iterkeys()):
print ' %s: %s' % (archive_key, archive[archive_key])
else:
print ' No archives for this host.'
elif key not in (manifest_util.ARCHIVES_KEY, manifest_util.NAME_KEY):
print ' %s: %s' % (key, value)
print
```
#### File: chrome_remote_control/chrome_remote_control/multi_page_benchmark.py
```python
from collections import defaultdict
import os
import sys
from chrome_remote_control import page_test
# Get build/android/pylib scripts into our path.
# TODO(tonyg): Move perf_tests_helper.py to a common location.
sys.path.append(
os.path.abspath(
os.path.join(os.path.dirname(__file__),
'../../../build/android/pylib')))
# pylint: disable=F0401
from perf_tests_helper import GeomMeanAndStdDevFromHistogram
from perf_tests_helper import PrintPerfResult # pylint: disable=F0401
def _Mean(l):
return float(sum(l)) / len(l) if len(l) > 0 else 0.0
class MeasurementFailure(page_test.Failure):
"""Exception that can be thrown from MeasurePage to indicate an undesired but
designed-for problem."""
pass
class BenchmarkResults(page_test.PageTestResults):
def __init__(self):
super(BenchmarkResults, self).__init__()
self.results_summary = defaultdict(list)
self.page_results = []
self.field_names = None
self.field_units = {}
self.field_types = {}
self._page = None
self._page_values = {}
def WillMeasurePage(self, page):
self._page = page
self._page_values = {}
def Add(self, name, units, value, data_type='default'):
assert name not in self._page_values, 'Result names must be unique'
assert name != 'url', 'The name url cannot be used'
if self.field_names:
assert name in self.field_names, """MeasurePage returned inconsistent
results! You must return the same dict keys every time."""
else:
self.field_units[name] = units
self.field_types[name] = data_type
self._page_values[name] = value
def DidMeasurePage(self):
assert self._page, 'Failed to call WillMeasurePage'
if not self.field_names:
self.field_names = self._page_values.keys()
self.field_names.sort()
self.page_results.append(self._page_values)
for name in self.field_names:
units = self.field_units[name]
data_type = self.field_types[name]
value = self._page_values[name]
self.results_summary[(name, units, data_type)].append(value)
def PrintSummary(self, trace_tag):
for measurement_units_type, values in sorted(
self.results_summary.iteritems()):
measurement, units, data_type = measurement_units_type
trace = measurement + (trace_tag or '')
PrintPerfResult(measurement, trace, values, units, data_type)
class CsvBenchmarkResults(BenchmarkResults):
def __init__(self, results_writer):
super(CsvBenchmarkResults, self).__init__()
self._results_writer = results_writer
self._did_write_header = False
def DidMeasurePage(self):
super(CsvBenchmarkResults, self).DidMeasurePage()
if not self._did_write_header:
self._did_write_header = True
row = ['url']
for name in self.field_names:
row.append('%s (%s)' % (name, self.field_units[name]))
self._results_writer.writerow(row)
row = [self._page.url]
for name in self.field_names:
value = self._page_values[name]
if self.field_types[name] == 'histogram':
avg, _ = GeomMeanAndStdDevFromHistogram(value)
row.append(avg)
elif isinstance(value, list):
row.append(_Mean(value))
else:
row.append(value)
self._results_writer.writerow(row)
# TODO(nduca): Rename to page_benchmark
class MultiPageBenchmark(page_test.PageTest):
"""Glue code for running a benchmark across a set of pages.
To use this, subclass from the benchmark and override MeasurePage. For
example:
class BodyChildElementBenchmark(MultiPageBenchmark):
def MeasurePage(self, page, tab, results):
body_child_count = tab.runtime.Evaluate(
'document.body.children.length')
results.Add('body_children', 'count', body_child_count)
if __name__ == '__main__':
multi_page_benchmark.Main(BodyChildElementBenchmark())
All benchmarks should include a unit test!
TODO(nduca): Add explanation of how to write the unit test.
To add test-specific options:
class BodyChildElementBenchmark(MultiPageBenchmark):
def AddOptions(parser):
parser.add_option('--element', action='store', default='body')
def MeasurePage(self, page, tab, results):
body_child_count = tab.runtime.Evaluate(
'document.querySelector('%s').children.length')
results.Add('children', 'count', child_count)
"""
def __init__(self):
super(MultiPageBenchmark, self).__init__('_RunTest')
def _RunTest(self, page, tab, results):
results.WillMeasurePage(page)
self.MeasurePage(page, tab, results)
results.DidMeasurePage()
def MeasurePage(self, page, tab, results):
"""Override to actually measure the page's performance.
page is a page_set.Page
tab is an instance of chrome_remote_control.Tab
Should call results.Add(name, units, value) for each result, or raise an
exception on failure. The name and units of each Add() call must be
the same across all iterations. The name 'url' must not be used.
Prefer field names that are in accordance with python variable style. E.g.
field_name.
Put together:
def MeasurePage(self, page, tab, results):
res = tab.runtime.Evaluate('2+2')
if res != 4:
raise Exception('Oh, wow.')
results.Add('two_plus_two', 'count', res)
"""
raise NotImplementedError()
```
#### File: perf/perf_tools/robohornetpro.py
```python
from chrome_remote_control import multi_page_benchmark
from chrome_remote_control import util
class RobohornetPro(multi_page_benchmark.MultiPageBenchmark):
def CustomizeBrowserOptions(self, options):
# Benchmark require use of real Date.now() for measurement.
options.wpr_make_javascript_deterministic = False
def MeasurePage(self, _, tab, results):
tab.runtime.Execute('ToggleRoboHornet()')
done = 'document.getElementById("results").innerHTML.indexOf("Total") != -1'
def _IsDone():
return tab.runtime.Evaluate(done)
util.WaitFor(_IsDone, 60)
result = int(tab.runtime.Evaluate('stopTime - startTime'))
results.Add('Total', 'ms', result)
``` |
{
"source": "jiangmin-antivairus/cuckoo-docker",
"score": 2
} |
#### File: jiangmin-antivairus/cuckoo-docker/sysinit.py
```python
import json
from configparser import ConfigParser
import xml.etree.ElementTree as ET
import os
import time
# 通过基础镜像增量生成各个虚拟机磁盘文件
def generate_vm_disk(base_disk_path, vm_disk_path):
print('qemu-img create -f qcow2 -b ' + base_disk_path + ' ' + vm_disk_path)
if os.path.exists(vm_disk_path):
os.remove(vm_disk_path)
os.system('qemu-img create -f qcow2 -b ' + base_disk_path + ' ' + vm_disk_path)
# 通过虚拟机配置文件模板生成虚拟机配置文件
def generate_vm_xml(vm_name, vm_template_path, vm_disk_path, vm_xml_path, mac, vcpu=2, vmem=2):
# 读取虚拟机配置文件模板
tree = ET.parse(vm_template_path)
root = tree.getroot()
# 设置虚拟机名称
root.find('./name').text = vm_name
# 设置网卡mac
root.find('./devices/interface/mac').attrib['address'] = mac
# 设置cpu数量
root.find('./vcpu').text = str(vcpu)
# 设置内存大小
root.find('./memory').text = str(vmem)
# 设置镜像路径
root.find('./devices/disk/source').attrib['file'] = vm_disk_path
# 保存虚拟机配置文件
tree.write(vm_xml_path)
# 通过配置文件创建虚拟机实例
def create_vm_by_xml(vm_xml_path):
os.system('virsh define ' + vm_xml_path)
# 创建num个虚拟机
def create_vm(num, vm_os, vm_template_path, vm_xml_dir='./vm_xml', vm_disk_dir='./images'):
if num > 255:
print('虚拟机数量不能超过255个')
return
vm_xml_dir = os.path.abspath(vm_xml_dir)
vm_disk_dir = os.path.abspath(vm_disk_dir)
print(vm_xml_dir)
print(vm_disk_dir)
# 创建虚拟机配置文件目录
if not os.path.exists(vm_xml_dir):
os.mkdir(vm_xml_dir)
# 创建虚拟机磁盘文件目录
if not os.path.exists(vm_disk_dir):
os.mkdir(vm_disk_dir)
# 创建虚拟机
for i in range(num):
vm_name = vm_os + '_' + str(i + 1)
vm_disk_path = vm_disk_dir + '/' + vm_name + '.qcow2'
vm_xml_path = vm_xml_dir + '/' + vm_name + '.xml'
generate_vm_xml(vm_name=vm_name, vm_template_path=vm_template_path, vm_disk_path=vm_disk_path,
vm_xml_path=vm_xml_path, mac='52:54:00:00:00:' + "%02x" % (i + 1))
generate_vm_disk(vm_disk_dir + '/' + vm_os + '.qcow2', vm_disk_path)
create_vm_by_xml(vm_xml_path)
# 修改dhcp分配静态IP
os.system('virsh net-update default add ip-dhcp-host --xml \'<host mac="52:54:00:00:00:' + "%02x" % (
i + 1) + '" name="' + vm_name + '" ip="192.168.122.' + str(i + 2) + '" />\'')
# 启动虚拟机
os.system('virsh start ' + vm_name)
# 检查windows虚拟机是否成功启动
while True:
if os.system('ping -c 1 192.168.122.' + str(i + 2)) == 0:
break
else:
print('等待虚拟机' + vm_name + '启动')
time.sleep(1)
# 等待系统初始化完成(预估量)
time.sleep(30)
# 创建default快照
os.system('virsh snapshot-create-as ' + vm_name + ' ' + 'default')
# 关闭虚拟机
os.system('virsh shutdown ' + vm_name)
# 初始化cuckoo
def cuckoo_init():
os.system('cuckoo init')
# 修改cuckoo kvm配置文件(配置文件只采用默认路径/root/.cuckoo)
def modify_cuckoo_kvm_conf(num, vm_os, resultserver_ip):
platform = {
'win7x64': 'windows',
'win7x86': 'windows',
'centos': 'linux',
}
config = ConfigParser()
if os.path.exists('/root/.cuckoo/conf/kvm.conf'):
config.read('/root/.cuckoo/conf/kvm.conf')
config.add_section('kvm')
config.set('kvm', 'dsn', 'qemu:///system')
config.set('kvm', 'machines', ','.join([vm_os + '_' + str(i) for i in range(1, num + 1)]))
config.set('kvm', 'interface', 'virbr0')
for i in range(1, num + 1):
lable = vm_os + '_' + str(i)
config.add_section(lable)
config.set(lable, 'label', lable)
config.set(lable, 'platform', platform[vm_os])
config.set(lable, 'ip', '192.168.122.' + str(i + 1))
config.set(lable, 'snapshot', 'default')
config.set(lable, 'resultserver_ip', resultserver_ip)
config.set(lable, 'resultserver_port', '2042')
config.set(lable, 'tags', '')
config.set(lable, 'osprofile', '')
config.set(lable, 'interface', '')
if not os.path.exists('/root/.cuckoo/conf'):
os.mkdir('/root/.cuckoo/conf')
if os.path.exists('/root/.cuckoo/conf/kvm.conf'):
os.remove('/root/.cuckoo/conf/kvm.conf')
config.write(open('/root/.cuckoo/conf/kvm.conf', 'w'))
pass
# 修改cuckoo其他配置文件
def modify_cuckoo_conf():
config = ConfigParser()
config.read('/root/.cuckoo/conf/cuckoo.conf')
config.set('cuckoo', 'version_check', 'no')
config.set('cuckoo', 'api_token', '<PASSWORD>')
config.set('cuckoo', 'machinery', 'kvm')
config.set('database', 'connection', 'mysql://root:root@localhost:3306/cuckoo')
config.set('resultserver', 'ip', '0.0.0.0')
config.write(open('/root/.cuckoo/conf/cuckoo.conf', 'w'))
config = ConfigParser()
config.read('/root/.cuckoo/conf/auxiliary.conf')
config.set('sniffer', 'tcpdump', '/usr/bin/tcpdump')
config.write(open('/root/.cuckoo/conf/auxiliary.conf', 'w'))
config = ConfigParser()
config.read('/root/.cuckoo/conf/reporting.conf')
config.set('mongodb', 'enabled', 'yes')
config.write(open('/root/.cuckoo/conf/reporting.conf', 'w'))
pass
# 导入cuckoo社区
def import_cuckoo_community():
os.system('cuckoo community --file /root/community-master.tar.gz')
pass
if __name__ == '__main__':
# 读取avas.cuckoo.win7x64.vm_num环境变量
if os.getenv('avas_cuckoo_win7x64_vm_num') is None:
win7x64_vm_num = 2
else:
win7x64_vm_num = int(os.getenv('avas_cuckoo_win7x64_vm_num'))
print('win7x64虚拟机数量:' + str(win7x64_vm_num))
create_vm(win7x64_vm_num, 'win7x64', vm_template_path='win7x64.xml')
cuckoo_init()
import_cuckoo_community()
# 删除已有kvm.conf
os.remove('/root/.cuckoo/conf/kvm.conf')
modify_cuckoo_kvm_conf(win7x64_vm_num, 'win7x64', '192.168.122.1')
modify_cuckoo_conf()
os.system('cp supervisord.conf /root/.cuckoo/supervisord.conf')
``` |
{
"source": "jiangming1/archery",
"score": 2
} |
#### File: archery/sql/query.py
```python
import datetime
import logging
import re
import time
import traceback
import simplejson as json
from django.contrib.auth.decorators import permission_required
from django.core import serializers
from django.db import connection
from django.db import transaction
from django.db.models import Q, Min
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from common.config import SysConfig
from common.utils.const import WorkflowDict
from common.utils.extend_json_encoder import ExtendJSONEncoder
from sql.utils.dao import Dao
from sql.utils.data_masking import Masking
from sql.utils.group import user_instances, user_groups
from sql.utils.workflow import Workflow
from .models import QueryPrivilegesApply, QueryPrivileges, QueryLog, SqlGroup
logger = logging.getLogger('default')
datamasking = Masking()
workflowOb = Workflow()
# 查询权限申请用于工作流审核回调
def query_audit_call_back(workflow_id, workflow_status):
# 更新业务表状态
apply_info = QueryPrivilegesApply()
apply_info.apply_id = workflow_id
apply_info.status = workflow_status
apply_info.save(update_fields=['status'])
# 审核通过插入权限信息,批量插入,减少性能消耗
if workflow_status == WorkflowDict.workflow_status['audit_success']:
apply_queryset = QueryPrivilegesApply.objects.get(apply_id=workflow_id)
# 库权限
if apply_queryset.priv_type == 1:
insertlist = [QueryPrivileges(
user_name=apply_queryset.user_name,
user_display=apply_queryset.user_display,
instance_name=apply_queryset.instance_name, db_name=db_name,
table_name=apply_queryset.table_list, valid_date=apply_queryset.valid_date,
limit_num=apply_queryset.limit_num, priv_type=apply_queryset.priv_type) for db_name in
apply_queryset.db_list.split(',')]
# 表权限
elif apply_queryset.priv_type == 2:
insertlist = [QueryPrivileges(
user_name=apply_queryset.user_name,
user_display=apply_queryset.user_display,
instance_name=apply_queryset.instance_name, db_name=apply_queryset.db_list,
table_name=table_name, valid_date=apply_queryset.valid_date,
limit_num=apply_queryset.limit_num, priv_type=apply_queryset.priv_type) for table_name in
apply_queryset.table_list.split(',')]
QueryPrivileges.objects.bulk_create(insertlist)
# 查询权限校验
def query_priv_check(user, instance_name, db_name, sql_content, limit_num):
result = {'status': 0, 'msg': 'ok', 'data': {'priv_check': 1, 'limit_num': 0}}
# 检查用户是否有该数据库/表的查询权限
if user.is_superuser:
if SysConfig().sys_config.get('admin_query_limit'):
user_limit_num = int(SysConfig().sys_config.get('admin_query_limit'))
else:
user_limit_num = 0
limit_num = int(user_limit_num) if int(limit_num) == 0 else min(int(limit_num), int(user_limit_num))
# 查看表结构和执行计划,inception会报错,故单独处理,explain直接跳过不做校验
elif re.match(r"^show\s+create\s+table", sql_content.lower()):
tb_name = re.sub('^show\s+create\s+table', '', sql_content, count=1, flags=0).strip()
# 先判断是否有整库权限
db_privileges = QueryPrivileges.objects.filter(user_name=user.username, instance_name=instance_name,
db_name=db_name, priv_type=1,
valid_date__gte=datetime.datetime.now(), is_deleted=0)
# 无整库权限再验证表权限
if len(db_privileges) == 0:
tb_privileges = QueryPrivileges.objects.filter(user_name=user.username, instance_name=instance_name,
db_name=db_name, table_name=tb_name, priv_type=2,
valid_date__gte=datetime.datetime.now(), is_deleted=0)
if len(tb_privileges) == 0:
result['status'] = 1
result['msg'] = '你无' + db_name + '.' + tb_name + '表的查询权限!请先到查询权限管理进行申请'
return result
# sql查询, 可以校验到表级权限
else:
# 首先使用inception的语法树打印获取查询涉及的的表
table_ref_result = datamasking.query_table_ref(sql_content + ';', instance_name, db_name)
# 正确解析拿到表数据,可以校验表权限
if table_ref_result['status'] == 0:
table_ref = table_ref_result['data']
# 获取表信息,校验是否拥有全部表查询权限
QueryPrivilegesOb = QueryPrivileges.objects.filter(user_name=user.username, instance_name=instance_name)
# 先判断是否有整库权限
for table in table_ref:
db_privileges = QueryPrivilegesOb.filter(db_name=table['db'], priv_type=1,
valid_date__gte=datetime.datetime.now(),
is_deleted=0)
# 无整库权限再验证表权限
if len(db_privileges) == 0:
tb_privileges = QueryPrivilegesOb.filter(db_name=table['db'], table_name=table['table'],
valid_date__gte=datetime.datetime.now(), is_deleted=0)
if len(tb_privileges) == 0:
result['status'] = 1
result['msg'] = '你无' + table['db'] + '.' + table['table'] + '表的查询权限!请先到查询权限管理进行申请'
return result
# 获取表数据报错,检查配置文件是否允许继续执行,并进行库权限校验
else:
table_ref = None
# 校验库权限,防止inception的语法树打印错误时连库权限也未做校验
privileges = QueryPrivileges.objects.filter(user_name=user.username, instance_name=instance_name,
db_name=db_name,
valid_date__gte=datetime.datetime.now(),
is_deleted=0)
if len(privileges) == 0:
result['status'] = 1
result['msg'] = '你无' + db_name + '数据库的查询权限!请先到查询权限管理进行申请'
return result
if SysConfig().sys_config.get('query_check'):
return table_ref_result
else:
result['data']['priv_check'] = 2
# 获取查询涉及表的最小limit限制
if table_ref:
db_list = [table_info['db'] for table_info in table_ref]
table_list = [table_info['table'] for table_info in table_ref]
user_limit_num = QueryPrivileges.objects.filter(user_name=user.username,
instance_name=instance_name,
db_name__in=db_list,
table_name__in=table_list,
valid_date__gte=datetime.datetime.now(),
is_deleted=0).aggregate(Min('limit_num'))['limit_num__min']
if user_limit_num is None:
# 如果表没获取到则获取涉及库的最小limit限制
user_limit_num = QueryPrivileges.objects.filter(user_name=user.username,
instance_name=instance_name,
db_name=db_name,
valid_date__gte=datetime.datetime.now(), is_deleted=0
).aggregate(Min('limit_num'))['limit_num__min']
else:
# 如果表没获取到则获取涉及库的最小limit限制
user_limit_num = QueryPrivileges.objects.filter(user_name=user.username,
instance_name=instance_name,
db_name=db_name,
valid_date__gte=datetime.datetime.now(),
is_deleted=0).aggregate(Min('limit_num'))['limit_num__min']
limit_num = int(user_limit_num) if int(limit_num) == 0 else min(int(limit_num), int(user_limit_num))
result['data']['limit_num'] = limit_num
return result
# 获取查询权限申请列表
@permission_required('sql.menu_queryapplylist', raise_exception=True)
def getqueryapplylist(request):
# 获取用户信息
user = request.user
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
limit = offset + limit
search = request.POST.get('search', '')
# 获取列表数据,申请人只能查看自己申请的数据,管理员可以看到全部数据,审核人可以看到自己审核的数据
if user.is_superuser:
lists = QueryPrivilegesApply.objects.all().filter(
Q(title__contains=search) | Q(user_display__contains=search)).order_by('-apply_id')[
offset:limit].values(
'apply_id', 'title', 'instance_name', 'db_list', 'priv_type', 'table_list', 'limit_num', 'valid_date',
'user_display', 'status', 'create_time', 'group_name'
)
count = QueryPrivilegesApply.objects.all().filter(title__contains=search).count()
elif user.has_perm('sql.query_review'):
# 先获取用户所在资源组列表
group_list = user_groups(user)
group_ids = [group.group_id for group in group_list]
lists = QueryPrivilegesApply.objects.filter(group_id__in=group_ids).filter(
Q(title__contains=search) | Q(user_display__contains=search)).order_by('-apply_id')[offset:limit].values(
'apply_id', 'title', 'instance_name', 'db_list', 'priv_type', 'table_list', 'limit_num', 'valid_date',
'user_display', 'status', 'create_time', 'group_name'
)
count = QueryPrivilegesApply.objects.filter(group_id__in=group_ids).filter(
Q(title__contains=search) | Q(user_display__contains=search)).count()
else:
lists = QueryPrivilegesApply.objects.filter(user_name=user.username).filter(
Q(title__contains=search) | Q(user_display__contains=search)).order_by('-apply_id')[offset:limit].values(
'apply_id', 'title', 'instance_name', 'db_list', 'priv_type', 'table_list', 'limit_num', 'valid_date',
'user_display', 'status', 'create_time', 'group_name'
)
count = QueryPrivilegesApply.objects.filter(user_name=user.username).filter(
Q(title__contains=search) | Q(user_display__contains=search)).count()
# QuerySet 序列化
rows = [row for row in lists]
result = {"total": count, "rows": rows}
# 返回查询结果
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
# 申请查询权限
@permission_required('sql.query_applypriv', raise_exception=True)
def applyforprivileges(request):
title = request.POST['title']
instance_name = request.POST['instance_name']
group_name = request.POST['group_name']
group_id = SqlGroup.objects.get(group_name=group_name).group_id
priv_type = request.POST['priv_type']
db_name = request.POST['db_name']
valid_date = request.POST['valid_date']
limit_num = request.POST['limit_num']
try:
workflow_remark = request.POST['apply_remark']
except Exception:
workflow_remark = ''
# 获取用户信息
user = request.user
# 服务端参数校验
result = {'status': 0, 'msg': 'ok', 'data': []}
if int(priv_type) == 1:
db_list = request.POST['db_list']
if title is None or instance_name is None or db_list is None or valid_date is None or limit_num is None:
result['status'] = 1
result['msg'] = '请填写完整'
return HttpResponse(json.dumps(result), content_type='application/json')
elif int(priv_type) == 2:
table_list = request.POST['table_list']
if title is None or instance_name is None or db_name is None or valid_date is None or table_list is None or limit_num is None:
result['status'] = 1
result['msg'] = '请填写完整'
return HttpResponse(json.dumps(result), content_type='application/json')
try:
user_instances(request.user, 'slave').get(instance_name=instance_name)
except Exception:
context = {'errMsg': '你所在组未关联该从库!'}
return render(request, 'error.html', context)
# 判断是否需要限制到表级别的权限
# 库权限
if int(priv_type) == 1:
db_list = db_list.split(',')
# 检查申请账号是否已拥整个库的查询权限
own_dbs = QueryPrivileges.objects.filter(instance_name=instance_name, user_name=user.username,
db_name__in=db_list,
valid_date__gte=datetime.datetime.now(), priv_type=1,
is_deleted=0).values('db_name')
own_db_list = [table_info['db_name'] for table_info in own_dbs]
if own_db_list is None:
pass
else:
for db_name in db_list:
if db_name in own_db_list:
result['status'] = 1
result['msg'] = '你已拥有' + instance_name + '实例' + db_name + '库的全部查询权限,不能重复申请'
return HttpResponse(json.dumps(result), content_type='application/json')
# 表权限
elif int(priv_type) == 2:
table_list = table_list.split(',')
# 检查申请账号是否已拥有该表的查询权限
own_tables = QueryPrivileges.objects.filter(instance_name=instance_name, user_name=user.username,
db_name=db_name,
table_name__in=table_list, valid_date__gte=datetime.datetime.now(),
priv_type=2, is_deleted=0).values('table_name')
own_table_list = [table_info['table_name'] for table_info in own_tables]
if own_table_list is None:
pass
else:
for table_name in table_list:
if table_name in own_table_list:
result['status'] = 1
result['msg'] = '你已拥有' + instance_name + '实例' + db_name + '.' + table_name + '表的查询权限,不能重复申请'
return HttpResponse(json.dumps(result), content_type='application/json')
# 使用事务保持数据一致性
try:
with transaction.atomic():
# 保存申请信息到数据库
applyinfo = QueryPrivilegesApply()
applyinfo.title = title
applyinfo.group_id = group_id
applyinfo.group_name = group_name
applyinfo.audit_auth_groups = Workflow.auditsettings(group_id, WorkflowDict.workflow_type['query'])
applyinfo.user_name = user.username
applyinfo.user_display = user.display
applyinfo.instance_name = instance_name
if int(priv_type) == 1:
applyinfo.db_list = ','.join(db_list)
applyinfo.table_list = ''
elif int(priv_type) == 2:
applyinfo.db_list = db_name
applyinfo.table_list = ','.join(table_list)
applyinfo.priv_type = int(priv_type)
applyinfo.valid_date = valid_date
applyinfo.status = WorkflowDict.workflow_status['audit_wait'] # 待审核
applyinfo.limit_num = limit_num
applyinfo.create_user = user.username
applyinfo.save()
apply_id = applyinfo.apply_id
# 调用工作流插入审核信息,查询权限申请workflow_type=1
auditresult = workflowOb.addworkflowaudit(request, WorkflowDict.workflow_type['query'], apply_id)
if auditresult['status'] == 0:
# 更新业务表审核状态,判断是否插入权限信息
query_audit_call_back(apply_id, auditresult['data']['workflow_status'])
except Exception as msg:
logger.error(traceback.format_exc())
result['status'] = 1
result['msg'] = str(msg)
else:
result = auditresult
return HttpResponse(json.dumps(result), content_type='application/json')
# 用户的查询权限管理
def getuserprivileges(request):
user_name = request.POST.get('user_name')
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
limit = offset + limit
search = request.POST.get('search', '')
# 判断权限,除了管理员外其他人只能查看自己的权限信息,
user = request.user
# 获取用户的权限数据
if user.is_superuser:
if user_name != 'all':
privilegeslist = QueryPrivileges.objects.all().filter(user_name=user_name,
is_deleted=0,
table_name__contains=search,
valid_date__gte=datetime.datetime.now()
).order_by('-privilege_id')[offset:limit]
privilegeslistCount = QueryPrivileges.objects.all().filter(user_name=user_name,
is_deleted=0,
table_name__contains=search,
valid_date__gte=datetime.datetime.now()).count()
else:
privilegeslist = QueryPrivileges.objects.all().filter(is_deleted=0,
table_name__contains=search,
valid_date__gte=datetime.datetime.now()
).order_by('-privilege_id')[offset:limit]
privilegeslistCount = QueryPrivileges.objects.all().filter(is_deleted=0,
table_name__contains=search,
valid_date__gte=datetime.datetime.now()
).count()
else:
privilegeslist = QueryPrivileges.objects.filter(user_name=user.username,
table_name__contains=search,
is_deleted=0,
valid_date__gte=datetime.datetime.now()
).order_by('-privilege_id')[offset:limit]
privilegeslistCount = QueryPrivileges.objects.filter(user_name=user.username,
table_name__contains=search,
is_deleted=0,
valid_date__gte=datetime.datetime.now()
).count()
# QuerySet 序列化
privilegeslist = serializers.serialize("json", privilegeslist)
privilegeslist = json.loads(privilegeslist)
privilegeslist_result = []
for i in range(len(privilegeslist)):
privilegeslist[i]['fields']['id'] = privilegeslist[i]['pk']
privilegeslist_result.append(privilegeslist[i]['fields'])
result = {"total": privilegeslistCount, "rows": privilegeslist_result}
# 返回查询结果
return HttpResponse(json.dumps(result), content_type='application/json')
# 变更权限信息
@permission_required('sql.query_mgtpriv', raise_exception=True)
def modifyqueryprivileges(request):
privilege_id = request.POST.get('privilege_id')
type = request.POST.get('type')
result = {'status': 0, 'msg': 'ok', 'data': []}
# type=1删除权限,type=2变更权限
privileges = QueryPrivileges()
if int(type) == 1:
# 删除权限
privileges.privilege_id = int(privilege_id)
privileges.is_deleted = 1
privileges.save(update_fields=['is_deleted'])
return HttpResponse(json.dumps(result), content_type='application/json')
elif int(type) == 2:
# 变更权限
valid_date = request.POST.get('valid_date')
limit_num = request.POST.get('limit_num')
privileges.privilege_id = int(privilege_id)
privileges.valid_date = valid_date
privileges.limit_num = limit_num
privileges.save(update_fields=['valid_date', 'limit_num'])
return HttpResponse(json.dumps(result), content_type='application/json')
# 查询权限审核
@permission_required('sql.query_review', raise_exception=True)
def queryprivaudit(request):
# 获取用户信息
user = request.user
apply_id = int(request.POST['apply_id'])
audit_status = int(request.POST['audit_status'])
audit_remark = request.POST.get('audit_remark')
if audit_remark is None:
audit_remark = ''
if Workflow.can_review(request.user, apply_id, 1) is False:
context = {'errMsg': '你无权操作当前工单!'}
return render(request, 'error.html', context)
# 使用事务保持数据一致性
try:
with transaction.atomic():
# 获取audit_id
audit_id = Workflow.auditinfobyworkflow_id(workflow_id=apply_id,
workflow_type=WorkflowDict.workflow_type['query']).audit_id
# 调用工作流接口审核
auditresult = workflowOb.auditworkflow(request, audit_id, audit_status, user.username, audit_remark)
# 按照审核结果更新业务表审核状态
auditInfo = Workflow.auditinfo(audit_id)
if auditInfo.workflow_type == WorkflowDict.workflow_type['query']:
# 更新业务表审核状态,插入权限信息
query_audit_call_back(auditInfo.workflow_id, auditresult['data']['workflow_status'])
except Exception as msg:
logger.error(traceback.format_exc())
context = {'errMsg': msg}
return render(request, 'error.html', context)
return HttpResponseRedirect(reverse('sql:queryapplydetail', args=(apply_id,)))
# 获取SQL查询结果
@permission_required('sql.query_submit', raise_exception=True)
def query(request):
instance_name = request.POST.get('instance_name')
sql_content = request.POST.get('sql_content')
db_name = request.POST.get('db_name')
limit_num = request.POST.get('limit_num')
result = {'status': 0, 'msg': 'ok', 'data': {}}
# 服务器端参数验证
if sql_content is None or db_name is None or instance_name is None or limit_num is None:
result['status'] = 1
result['msg'] = '页面提交参数可能为空'
return HttpResponse(json.dumps(result), content_type='application/json')
sql_content = sql_content.strip()
# 获取用户信息
user = request.user
# 过滤注释语句和非查询的语句
sql_content = ''.join(
map(lambda x: re.compile(r'(^--\s+.*|^/\*.*\*/;\s*$)').sub('', x, count=1),
sql_content.splitlines(1))).strip()
# 去除空行
sql_content = re.sub('[\r\n\f]{2,}', '\n', sql_content)
sql_list = sql_content.strip().split('\n')
for sql in sql_list:
if re.match(r"^select|^show|^explain", sql.lower()):
break
else:
result['status'] = 1
result['msg'] = '仅支持^select|^show|^explain语法,请联系管理员!'
return HttpResponse(json.dumps(result), content_type='application/json')
# 按照分号截取第一条有效sql执行
sql_content = sql_content.strip().split(';')[0]
try:
# 查询权限校验
priv_check_info = query_priv_check(user, instance_name, db_name, sql_content, limit_num)
if priv_check_info['status'] == 0:
limit_num = priv_check_info['data']['limit_num']
priv_check = priv_check_info['data']['priv_check']
else:
return HttpResponse(json.dumps(priv_check_info), content_type='application/json')
if re.match(r"^explain", sql_content.lower()):
limit_num = 0
# 对查询sql增加limit限制
if re.match(r"^select", sql_content.lower()):
if re.search(r"limit\s+(\d+)$", sql_content.lower()) is None:
if re.search(r"limit\s+\d+\s*,\s*(\d+)$", sql_content.lower()) is None:
sql_content = sql_content + ' limit ' + str(limit_num)
sql_content = sql_content + ';'
# 执行查询语句,统计执行时间
t_start = time.time()
sql_result = Dao(instance_name=instance_name).mysql_query(str(db_name), sql_content, limit_num)
t_end = time.time()
cost_time = "%5s" % "{:.4f}".format(t_end - t_start)
sql_result['cost_time'] = cost_time
# 数据脱敏,同样需要检查配置,是否开启脱敏,语法树解析是否允许出错继续执行
hit_rule = 0 if re.match(r"^select", sql_content.lower()) else 2 # 查询是否命中脱敏规则,0, '未知', 1, '命中', 2, '未命中'
masking = 2 # 查询结果是否正常脱敏,1, '是', 2, '否'
t_start = time.time()
# 仅对查询语句进行脱敏
if SysConfig().sys_config.get('data_masking') and re.match(r"^select", sql_content.lower()):
try:
masking_result = datamasking.data_masking(instance_name, db_name, sql_content, sql_result)
if masking_result['status'] != 0 and SysConfig().sys_config.get('query_check'):
return HttpResponse(json.dumps(masking_result), content_type='application/json')
else:
hit_rule = masking_result['data']['hit_rule']
masking = 1 if hit_rule == 1 else 2
except Exception:
logger.error(traceback.format_exc())
hit_rule = 0
masking = 2
if SysConfig().sys_config.get('query_check'):
result['status'] = 1
result['msg'] = '脱敏数据报错,请联系管理员'
return HttpResponse(json.dumps(result), content_type='application/json')
t_end = time.time()
masking_cost_time = "%5s" % "{:.4f}".format(t_end - t_start)
sql_result['masking_cost_time'] = masking_cost_time
result['data'] = sql_result
# 成功的查询语句记录存入数据库
if sql_result.get('Error'):
pass
else:
query_log = QueryLog()
query_log.username = user.username
query_log.user_display = user.display
query_log.db_name = db_name
query_log.instance_name = instance_name
query_log.sqllog = sql_content
if int(limit_num) == 0:
limit_num = int(sql_result['effect_row'])
else:
limit_num = min(int(limit_num), int(sql_result['effect_row']))
query_log.effect_row = limit_num
query_log.cost_time = cost_time
query_log.priv_check = priv_check
query_log.hit_rule = hit_rule
query_log.masking = masking
# 防止查询超时
try:
query_log.save()
except:
connection.close()
query_log.save()
except Exception as e:
logger.error(traceback.format_exc())
result['status'] = 1
result['msg'] = str(e)
# 返回查询结果
try:
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
except Exception:
return HttpResponse(json.dumps(result, default=str, bigint_as_string=True),
content_type='application/json')
# 获取sql查询记录
@permission_required('sql.menu_sqlquery', raise_exception=True)
def querylog(request):
# 获取用户信息
user = request.user
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
limit = offset + limit
search = request.POST.get('search', '')
# 查询个人记录,超管查看所有数据
if user.is_superuser:
sql_log_count = QueryLog.objects.all().filter(
Q(sqllog__contains=search) | Q(user_display__contains=search)).count()
sql_log_list = QueryLog.objects.all().filter(
Q(sqllog__contains=search) | Q(user_display__contains=search)).order_by(
'-id')[offset:limit]
else:
sql_log_count = QueryLog.objects.filter(username=user.username).filter(sqllog__contains=search).count()
sql_log_list = QueryLog.objects.filter(username=user.username).filter(sqllog__contains=search).order_by('-id')[
offset:limit]
# QuerySet 序列化
sql_log_list = serializers.serialize("json", sql_log_list)
sql_log_list = json.loads(sql_log_list)
sql_log = [log_info['fields'] for log_info in sql_log_list]
result = {"total": sql_log_count, "rows": sql_log}
# 返回查询结果
return HttpResponse(json.dumps(result), content_type='application/json')
# 获取SQL执行计划
@permission_required('sql.optimize_sqladvisor', raise_exception=True)
def explain(request):
sql_content = request.POST.get('sql_content')
instance_name = request.POST.get('instance_name')
db_name = request.POST.get('db_name')
result = {'status': 0, 'msg': 'ok', 'data': []}
# 服务器端参数验证
if sql_content is None or instance_name is None:
result['status'] = 1
result['msg'] = '页面提交参数可能为空'
return HttpResponse(json.dumps(result), content_type='application/json')
sql_content = sql_content.strip()
# 过滤非查询的语句
if re.match(r"^explain", sql_content.lower()):
pass
else:
result['status'] = 1
result['msg'] = '仅支持explain开头的语句,请检查'
return HttpResponse(json.dumps(result), content_type='application/json')
# 按照分号截取第一条有效sql执行
sql_content = sql_content.strip().split(';')[0]
# 执行获取执行计划语句
sql_result = Dao(instance_name=instance_name).mysql_query(str(db_name), sql_content)
result['data'] = sql_result
# 返回查询结果
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
```
#### File: sql/utils/execute_sql.py
```python
import re
import traceback
import simplejson as json
import time
from threading import Thread
from django.db import connection
from django.utils import timezone
from sql.utils.group import auth_group_users
from common.config import SysConfig
from sql.utils.dao import Dao
from common.utils.const import Const, WorkflowDict
from common.utils.sendmsg import MailSender
from sql.utils.inception import InceptionDao
from sql.models import Users, SqlWorkflow, SqlGroup
from sql.utils.workflow import Workflow
import logging
logger = logging.getLogger('default')
# SQL工单跳过inception执行回调
def execute_skipinc_call_back(workflowId, instance_name, db_name, sql_content, url):
workflowDetail = SqlWorkflow.objects.get(id=workflowId)
try:
# 执行sql
t_start = time.time()
execute_result = Dao(instance_name=instance_name).mysql_execute(db_name, sql_content)
t_end = time.time()
execute_time = "%5s" % "{:.4f}".format(t_end - t_start)
execute_result['execute_time'] = execute_time + 'sec'
workflowDetail = SqlWorkflow.objects.get(id=workflowId)
if execute_result.get('Warning'):
workflowDetail.status = Const.workflowStatus['exception']
elif execute_result.get('Error'):
workflowDetail.status = Const.workflowStatus['exception']
else:
workflowDetail.status = Const.workflowStatus['finish']
workflowDetail.finish_time = timezone.now()
workflowDetail.execute_result = json.dumps(execute_result)
workflowDetail.is_manual = 1
workflowDetail.audit_remark = ''
workflowDetail.is_backup = '否'
# 关闭后重新获取连接,防止超时
connection.close()
workflowDetail.save()
except Exception:
logger.error(traceback.format_exc())
# 增加工单日志
# 获取audit_id
audit_id = Workflow.auditinfobyworkflow_id(workflow_id=workflowId,
workflow_type=WorkflowDict.workflow_type['sqlreview']).audit_id
Workflow.add_workflow_log(audit_id=audit_id,
operation_type=6,
operation_type_desc='执行结束',
operation_info='执行结果:{}'.format(workflowDetail.status),
operator='',
operator_display='系统'
)
# 发送消息
send_msg(workflowDetail, url)
# SQL工单执行回调
def execute_call_back(workflowId, instance_name, url):
workflowDetail = SqlWorkflow.objects.get(id=workflowId)
try:
# 交给inception先split,再执行
(finalStatus, finalList) = InceptionDao(instance_name=instance_name).executeFinal(workflowDetail)
# 封装成JSON格式存进数据库字段里
strJsonResult = json.dumps(finalList)
workflowDetail = SqlWorkflow.objects.get(id=workflowId)
workflowDetail.execute_result = strJsonResult
workflowDetail.finish_time = timezone.now()
workflowDetail.status = finalStatus
workflowDetail.is_manual = 0
workflowDetail.audit_remark = ''
# 关闭后重新获取连接,防止超时
connection.close()
workflowDetail.save()
except Exception:
logger.error(traceback.format_exc())
# 增加工单日志
# 获取audit_id
audit_id = Workflow.auditinfobyworkflow_id(workflow_id=workflowId,
workflow_type=WorkflowDict.workflow_type['sqlreview']).audit_id
Workflow.add_workflow_log(audit_id=audit_id,
operation_type=6,
operation_type_desc='执行结束',
operation_info='执行结果:{}'.format(workflowDetail.status),
operator='',
operator_display='系统'
)
# 发送消息
send_msg(workflowDetail, url)
# 给定时任务执行sql
def execute_job(workflowId, url):
job_id = Const.workflowJobprefix['sqlreview'] + '-' + str(workflowId)
logger.debug('execute_job:' + job_id + ' start')
workflowDetail = SqlWorkflow.objects.get(id=workflowId)
instance_name = workflowDetail.instance_name
db_name = workflowDetail.db_name
# 服务器端二次验证,当前工单状态必须为定时执行过状态
if workflowDetail.status != Const.workflowStatus['timingtask']:
raise Exception('工单不是定时执行状态')
# 将流程状态修改为执行中,并更新reviewok_time字段
workflowDetail.status = Const.workflowStatus['executing']
workflowDetail.reviewok_time = timezone.now()
try:
workflowDetail.save()
except Exception:
# 关闭后重新获取连接,防止超时
connection.close()
workflowDetail.save()
logger.debug('execute_job:' + job_id + ' executing')
# 执行之前重新split并check一遍,更新SHA1缓存;因为如果在执行中,其他进程去做这一步操作的话,会导致inception core dump挂掉
splitReviewResult = InceptionDao(instance_name=instance_name).sqlautoReview(workflowDetail.sql_content,
db_name,
isSplit='yes')
workflowDetail.review_content = json.dumps(splitReviewResult)
try:
workflowDetail.save()
except Exception:
# 关闭后重新获取连接,防止超时
connection.close()
workflowDetail.save()
# 采取异步回调的方式执行语句,防止出现持续执行中的异常
t = Thread(target=execute_call_back, args=(workflowId, instance_name, url))
t.start()
# 增加工单日志
# 获取audit_id
audit_id = Workflow.auditinfobyworkflow_id(workflow_id=workflowId,
workflow_type=WorkflowDict.workflow_type['sqlreview']).audit_id
Workflow.add_workflow_log(audit_id=audit_id,
operation_type=5,
operation_type_desc='执行工单',
operation_info='系统定时执行',
operator='',
operator_display='系统'
)
# 执行结果通知
def send_msg(workflowDetail, url):
mailSender = MailSender()
sys_config = SysConfig().sys_config
# 获取当前审批和审批流程
audit_auth_group, current_audit_auth_group = Workflow.review_info(workflowDetail.id, 2)
audit_id = Workflow.auditinfobyworkflow_id(workflowDetail.id, 2).audit_id
# 如果执行完毕了,则根据配置决定是否给提交者和DBA一封邮件提醒,DBA需要知晓审核并执行过的单子
msg_title = "[{}]工单{}#{}".format(WorkflowDict.workflow_type['sqlreview_display'], workflowDetail.status, audit_id)
msg_content = '''发起人:{}\n审批流程:{}\n工单名称:{}\n工单地址:{}\n工单详情预览:{}\n'''.format(
workflowDetail.engineer_display, audit_auth_group, workflowDetail.workflow_name, url,
re.sub('[\r\n\f]{2,}', '\n', workflowDetail.sql_content[0:500].replace('\r', '')))
if sys_config.get('mail'):
# 邮件通知申请人,审核人,抄送DBA
notify_users = workflowDetail.audit_auth_groups.split(',')
notify_users.append(workflowDetail.engineer)
listToAddr = [email['email'] for email in Users.objects.filter(username__in=notify_users).values('email')]
listCcAddr = [email['email'] for email in
auth_group_users(auth_group_names=['DBA'], group_id=workflowDetail.group_id).values('email')]
mailSender.send_email(msg_title, msg_content, listToAddr, listCcAddr=listCcAddr)
if sys_config.get('ding'):
# 钉钉通知申请人,审核人,抄送DBA
webhook_url = SqlGroup.objects.get(group_id=workflowDetail.group_id).ding_webhook
MailSender.send_ding(webhook_url, msg_title + '\n' + msg_content)
if sys_config.get('mail') and sys_config.get('ddl_notify_auth_group', None) \
and workflowDetail.status == '已正常结束':
# 判断上线语句是否存在DDL,存在则通知相关人员
sql_content = workflowDetail.sql_content
# 删除注释语句
sql_content = ''.join(
map(lambda x: re.compile(r'(^--\s+.*|^/\*.*\*/;\s*$)').sub('', x, count=1),
sql_content.splitlines(1))).strip()
# 去除空行
sql_content = re.sub('[\r\n\f]{2,}', '\n', sql_content)
# 匹配DDL语句CREATE、ALTER(排除索引变更)、DROP、TRUNCATE、RENAME
send = 0
for row in sql_content.strip(';').split(';'):
# alter语法
if re.match(r"^alter\s+table\s+\S+\s+(add|alter|change|drop|rename|modify)\s+(?!.*(index|key|unique))",
row.strip().lower()):
send = 1
break
# create语法
elif re.match(r"^create\s+(temporary\s+)?(database|schema|table)", row.strip().lower()):
send = 1
break
# drop语法
elif re.match(r"^drop", row.strip().lower()):
send = 1
break
# rename语法
elif re.match(r"^rename", row.strip().lower()):
send = 1
break
# truncate语法
elif re.match(r"^truncate", row.strip().lower()):
send = 1
break
if send == 1:
# 消息内容通知
msg_title = '[archery]有新的DDL语句执行完成#{}'.format(audit_id)
msg_content = '''发起人:{}\n变更组:{}\n变更实例:{}\n变更数据库:{}\n工单名称:{}\n工单地址:{}\n工单预览:{}\n'''.format(
Users.objects.get(username=workflowDetail.engineer).display,
workflowDetail.group_name,
workflowDetail.instance_name,
workflowDetail.db_name,
workflowDetail.workflow_name,
url,
workflowDetail.sql_content[0:500])
# 获取通知成员
msg_to = [email['email'] for email in
Users.objects.filter(groups__name=sys_config.get('ddl_notify_auth_group')).values('email')]
# 发送
mailSender.send_email(msg_title, msg_content, msg_to)
``` |
{
"source": "jiangming1/qq2100803",
"score": 2
} |
#### File: components/bk_token/backends.py
```python
import logging
import traceback
from django.conf import settings
from django.contrib.auth.backends import ModelBackend
from django.db import IntegrityError
from blueapps.account import get_user_model
from blueapps.account.conf import ConfFixture
from blueapps.account.utils.http import send
from blueapps.utils import client
logger = logging.getLogger('component')
ROLE_TYPE_ADMIN = '1'
class TokenBackend(ModelBackend):
def authenticate(self, request=None, bk_token=None):
logger.debug(u"Enter in TokenBackend")
# 判断是否传入验证所需的bk_token,没传入则返回None
if not bk_token:
return None
verify_result, username = self.verify_bk_token(bk_token)
# 判断bk_token是否验证通过,不通过则返回None
if not verify_result:
return None
user_model = get_user_model()
try:
user, _ = user_model.objects.get_or_create(username=username)
get_user_info_result, user_info = self.get_user_info(bk_token)
# 判断是否获取到用户信息,获取不到则返回None
if not get_user_info_result:
return None
user.set_property(key='qq', value=user_info.get('qq', ''))
user.set_property(key='language',
value=user_info.get('language', ''))
user.set_property(key='time_zone',
value=user_info.get('time_zone', ''))
user.set_property(key='role', value=user_info.get('role', ''))
user.set_property(key='phone', value=user_info.get('phone', ''))
user.set_property(key='email', value=user_info.get('email', ''))
user.set_property(key='wx_userid',
value=user_info.get('wx_userid', ''))
user.set_property(key='chname', value=user_info.get('chname', ''))
# 用户权限更新,保持与平台同步
role = user_info.get('role', '')
is_admin = True if str(role) == ROLE_TYPE_ADMIN else False
user.is_superuser = is_admin
user.is_staff = is_admin
user.save()
return user
except IntegrityError:
logger.exception(traceback.format_exc())
logger.exception(
u"get_or_create UserModel fail or update_or_create UserProperty"
)
return None
except Exception:
logger.exception(traceback.format_exc())
logger.exception(u"Auto create & update UserModel fail")
return None
@staticmethod
def get_user_info(bk_token):
"""
请求平台ESB接口获取用户信息
@param bk_token: bk_token
@type bk_token: str
@return:True, {
u'message': u'\u7528\u6237\u4fe1\u606f\u83b7\u53d6\u6210\u529f',
u'code': 0,
u'data': {
u'qq': u'',
u'wx_userid': u'',
u'language': u'zh-cn',
u'username': u'test',
u'time_zone': u'Asia/Shanghai',
u'role': 2,
u'phone': u'11111111111',
u'email': u'test',
u'chname': u'test'
},
u'result': True,
u'request_id': u'eac0fee52ba24a47a335fd3fef75c099'
}
@rtype: bool,dict
"""
api_params = {
'bk_token': bk_token
}
try:
response = client.bk_login.get_user(api_params)
except Exception as e:
logger.exception(u"Abnormal error in get_user_info...:%s" % e)
return False, {}
if response.get('result') is True:
# 由于v1,v2的get_user存在差异,在这里屏蔽字段的差异,返回字段相同的字典
origin_user_info = response.get('data', '')
user_info = dict()
# v1,v2字段相同的部分
user_info['wx_userid'] = origin_user_info.get('wx_userid', '')
user_info['language'] = origin_user_info.get('language', '')
user_info['time_zone'] = origin_user_info.get('time_zone', '')
user_info['phone'] = origin_user_info.get('phone', '')
user_info['chname'] = origin_user_info.get('chname', '')
user_info['email'] = origin_user_info.get('email', '')
user_info['qq'] = origin_user_info.get('qq', '')
# v2版本特有的字段
if settings.DEFAULT_BK_API_VER == 'v2':
user_info['username'] = origin_user_info.get('bk_username', '')
user_info['role'] = origin_user_info.get('bk_role', '')
# v1版本特有的字段
elif settings.DEFAULT_BK_API_VER == '':
user_info['username'] = origin_user_info.get('username', '')
user_info['role'] = origin_user_info.get('role', '')
return True, user_info
else:
error_msg = response.get('message', '')
error_data = response.get('data', '')
logger.error(u"Failed to Get User Info: error=%(err)s, ret=%(ret)s"
% {
u'err': error_msg,
u'ret': error_data,
})
return False, {}
@staticmethod
def verify_bk_token(bk_token):
"""
请求VERIFY_URL,认证bk_token是否正确
@param bk_token: <KEY>"
@type bk_token: str
@return: False,None True,username
@rtype: bool,None/str
"""
api_params = {
'bk_token': bk_token
}
try:
response = send(ConfFixture.VERIFY_URL, 'GET', api_params,
verify=False)
except Exception:
logger.exception(u"Abnormal error in verify_bk_token...")
return False, None
if response.get('result'):
data = response.get('data')
username = data.get('username')
return True, username
else:
error_msg = response.get('message', '')
error_data = response.get('data', '')
logger.error(u"Fail to verify bk_token, error=%s, ret=%s" % (
error_msg, error_data))
return False, None
```
#### File: blueapps/account/middlewares.py
```python
from django.utils.module_loading import import_string
from blueapps.account.conf import ConfFixture
def load_middleware(middleware):
path = 'blueapps.account.components.{middleware}'.format(
middleware=middleware)
return import_string(path)
if hasattr(ConfFixture, 'LOGIN_REQUIRED_MIDDLEWARE'):
LoginRequiredMiddleware = load_middleware(
ConfFixture.LOGIN_REQUIRED_MIDDLEWARE)
if hasattr(ConfFixture, 'WEIXIN_MIDDLEWARE'):
WeixinLoginRequiredMiddleware = load_middleware(
ConfFixture.WEIXIN_MIDDLEWARE)
```
#### File: core/exceptions/base.py
```python
from __future__ import unicode_literals
class BlueException(Exception):
MESSAGE = "app异常"
ERROR_CODE = 500
def __init__(self, message=None, error_code=None, *args):
super(BlueException, self).__init__(*args)
self.error_code = self.ERROR_CODE if error_code is None else error_code
self.message = self.MESSAGE if message is None else message
class ClientBlueException(BlueException):
MESSAGE = "客户端请求异常"
ERROR_CODE = 40000
class ServerBlueException(BlueException):
MESSAGE = "服务端服务异常"
ERROR_CODE = 50000
class ResourceNotFound(ClientBlueException):
MESSAGE = "找不到请求的资源"
ERROR_CODE = 40400
class ParamValidationError(ClientBlueException):
MESSAGE = "参数验证失败"
ERROR_CODE = 40000
class ParamRequired(ClientBlueException):
MESSAGE = "关键参数缺失"
ERROR_CODE = 40001
class AccessForbidden(ClientBlueException):
MESSAGE = "登陆失败"
ERROR_CODE = 40301
class RequestForbidden(ClientBlueException):
MESSAGE = "请求拒绝"
ERROR_CODE = 40320
class ResourceLock(ClientBlueException):
MESSAGE = "请求资源被锁定"
ERROR_CODE = 40330
class MethodError(ClientBlueException):
MESSAGE = "请求方法不支持"
ERROR_CODE = 40501
class DatabaseError(ServerBlueException):
MESSAGE = "数据库异常"
ERROR_CODE = 50110
class ApiNetworkError(ServerBlueException):
MESSAGE = "网络异常导致远程服务失效"
ERROR_CODE = 50301
class ApiResultError(ServerBlueException):
MESSAGE = "远程服务请求结果异常"
ERROR_CODE = 50302
class ApiNotAcceptable(ServerBlueException):
MESSAGE = "远程服务返回结果格式异常"
ERROR_CODE = 50303
``` |
{
"source": "Jiang-Muyun/SqueezeSegV2",
"score": 3
} |
#### File: src/imdb/kitti.py
```python
import os
import numpy as np
import subprocess
from .imdb import imdb
class kitti(imdb):
def __init__(self, image_set, data_path, mc):
imdb.__init__(self, 'kitti_'+image_set, mc)
self._image_set = image_set
self._data_root_path = data_path
self._lidar_2d_path = os.path.join(self._data_root_path, 'lidar_2d')
# a list of string indices of images in the directory
self._image_idx = self._load_image_set_idx()
# a dict of image_idx -> [[cx, cy, w, h, cls_idx]]. x,y,w,h are not divided by
# the image width and height
## batch reader ##
self._perm_idx = None
self._cur_idx = 0
# TODO(bichen): add a random seed as parameter
self._shuffle_image_idx()
def _load_image_set_idx(self):
image_set_file = os.path.join(
self._data_root_path, 'ImageSet', self._image_set+'.txt')
assert os.path.exists(image_set_file), \
'File does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_idx = [x.strip() for x in f.readlines()]
return image_idx
def _lidar_2d_path_at(self, idx):
if idx[:4] == 'gta_':
lidar_2d_path = os.path.join(self._gta_2d_path, idx+'.npy')
else:
lidar_2d_path = os.path.join(self._lidar_2d_path, idx+'.npy')
assert os.path.exists(lidar_2d_path), \
'File does not exist: {}'.format(lidar_2d_path)
return lidar_2d_path
``` |
{
"source": "jiangnanboy/CNN4IE",
"score": 3
} |
#### File: cnn4ie/lcnn/lightweight_conv.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
def unfold1d(x, kernel_size, padding_l, pad_value=0):
'''
unfold T x B x C to T x B x C x K
:param x: [src_len, batch_size, hid_dim]
:param kernel_size:
:param padding_l:
:param pad_value:
:return:
'''
if kernel_size > 1:
T, B, C = x.size()
'''
F.pad() 维度扩充
1.x:需要扩充的tensor
2.(0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l):前4个参数完成了在高和宽维度上的扩张,后两个参数则完成了对通道维度上的扩充
(左边填充数,右边填充数,上边填充数,下边填充数,前边填充数,后边填充数)
3.value:扩充时指定补充值
'''
x = F.pad(
x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value
)
'''
as_strided() 根据现有tensor以及给定的步长来创建一个视图
1.size:指定了生成的视图的大小,需要为一个矩阵(当然此矩阵大小可以大于原矩阵,但是也有限制),可以是tensor或者list等等。
2.stride:输出tensor的步长,根据原矩阵和步长生成了新矩阵。
'''
x = x.as_strided((T, B, C, kernel_size), (B * C, C, 1, B * C))
else:
x = x.unsqueeze(3)
return x
class LightweightConv1dTBC(nn.Module):
'''Lightweight Convolution assuming the input is TxBxC
Args:
input_size: # of channels of the input
kernel_size: convolution channels
padding_l: padding to the left when using "same" padding
num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size)
weight_dropout: the drop rate of the DropConnect to drop the weight
weight_softmax: normalize the weight with softmax before the convolution
bias: use bias
Shape:
Input: TxBxC, i.e. (timesteps, batch_size, input_size) -> [src_len, batch_size, hid_dim]
Output: TxBxC, i.e. (timesteps, batch_size, input_size) -> [src_len, batch_size, hid_dim]
Attributes:
weight: the learnable weights of the module of shape
`(num_heads, 1, kernel_size)`
bias: the learnable bias of the module of shape `(input_size)`
'''
def __init__(self, input_size, kernel_size=1, padding_l=None, num_heads=1,
weight_dropout=0., weight_softmax=True, bias=True, with_linear=True, out_dim=None):
super().__init__()
self.embed_dim = input_size
out_dim = input_size if out_dim is None else out_dim
self.input_size = input_size
self.kernel_size = kernel_size
self.padding_l = padding_l
self.num_heads = num_heads
self.weight_dropout = weight_dropout
self.weight_softmax = weight_softmax
self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size)) # [num_heads, 1, kernel_size]
if bias:
self.bias = nn.Parameter(torch.Tensor(input_size))
else:
self.bias = None
self.linear1 = self.linear2 = None
if with_linear:
self.linear1 = Linear(input_size, input_size)
self.linear2 = Linear(input_size, out_dim)
self.reset_parameters() # 初始化 weight
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
nn.init.constant_(self.bias, 0.)
def forward(self, x, unfold=False):
'''
Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C
args:
x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size)
unfold: unfold the input or not. If not, we use the matrix trick instead
:param x: [src_len, batch_size, hid_dim]
:param unfold:
:return:
'''
if self.linear1 is not None:
x = self.linear1(x)
if unfold:
# [src_len, batch_size, hid_dim]
output = self._forward_unfolded(x)
else:
# [src_len, batch_size, hid_dim]
output = self._forward_expanded(x)
if self.bias is not None:
output = output + self.bias.view(1, 1, -1) # bias: [1, 1, hid_dim]
if self.linear2 is not None:
output = self.linear2(output) # [src_len, batch_size, out_dim]
return output
def _forward_unfolded(self, x):
'''The conventional implementation of convolutions.
Unfolding the input by having a window shifting to the right.'''
T, B, C = x.size() # [src_len, batch_size, hid_dim]
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
weight = self.weight.view(H, K) # [num_heads, 1, kernel_size] -> [num_heads, kernel_size]
# unfold the input: T x B x C --> T' x B x C x K [src_len, batch_size, hid_dim, kernel_size]
x_unfold = unfold1d(x, self.kernel_size, self.padding_l, 0)
x_unfold = x_unfold.view(T * B * H, R, K) # [src_len * batch_size * num_heads, hid_dim//num_heads, kernel_size]
if self.weight_softmax:
# [num_heads, kernel_size]
weight = F.softmax(weight, dim=1, dtype=torch.float32).type_as(weight)
# [src_len * batch_size * num_heads, kernel_size, 1]
weight = weight.view(1, H, K).expand(T * B, H, K).contiguous().view(T * B * H, K, 1)
weight = F.dropout(weight, self.weight_dropout)
# [src_len * batch_size * num_heads, hid_dim//num_heads, kernel_size] * [src_len * batch_size * num_heads, kernel_size, 1]
output = torch.bmm(x_unfold, weight) # T*B*H x R x 1 [src_len * batch_size * num_heads, hid_dim//num_heads, 1]
output = output.view(T, B, C) # [src_len, batch_size, hid_dim]
return output
def _forward_expanded(self, x):
'''Turn the convolution filters into band matrices and do matrix multiplication.
This is faster when the sequence is short, but less memory efficient.
This is not used in the decoder during inference.
'''
T, B, C = x.size() # [src_len, batch_size, hid_dim]
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
weight = self.weight.view(H, K) # [num_heads, 1, kernel_size] -> [num_heads, kernel_size]
if self.weight_softmax:
weight = F.softmax(weight, dim=1, dtype=torch.float32).type_as(weight)
weight = weight.view(1, H, K).expand(T * B, H, K).contiguous() # [src_len * batch_size, num_heads, kernel_size]
weight = weight.view(T, B * H, K).transpose(0, 1) # [batch_size * num_heads, src_len, kernel_size]
x = x.view(T, B * H, R).transpose(0, 1) # [batch_size * num_heads, src_len, hid_dim//num_heads]
P = self.padding_l
if K > T and P == K - 1:
weight = weight.narrow(2, K - T, T)
K, P = T, T - 1
# turn the convolution filters into band matrices
weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False)
weight_expanded.as_strided((B * H, T, K), (T * (T + K - 1), T + K, 1)).copy_(weight)
weight_expanded = weight_expanded.narrow(2, P, T)
weight_expanded = F.dropout(weight_expanded, self.weight_dropout, training=self.training)
output = torch.bmm(weight_expanded, x)
output = output.transpose(0, 1).contiguous().view(T, B, C) # [src_len, batch_size, hid_dim]
return output
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
```
#### File: cnn4ie/mixed_attention_cnn/mixed_attention.py
```python
import torch
import torch.nn as nn
import math
class SeparableConv1D(nn.Module):
"""This class implements separable convolution, i.e. a depthwise and a pointwise layer"""
def __init__(self, input_filters, output_filters, kernel_size):
super(SeparableConv1D, self).__init__()
self.depthwise = nn.Conv1d(
input_filters,
input_filters,
kernel_size=kernel_size,
groups=input_filters,
padding=kernel_size // 2,
bias=False,
)
self.pointwise = nn.Conv1d(input_filters, output_filters, kernel_size=1, bias=False)
self.bias = nn.Parameter(torch.zeros(output_filters, 1))
self.depthwise.weight.data.normal_(mean=0.0, std=0.01)
self.pointwise.weight.data.normal_(mean=0.0, std=0.01)
def forward(self, hidden_states):
x = self.depthwise(hidden_states)
x = self.pointwise(x)
x += self.bias
return x
class MixedAttention(nn.Module):
def __init__(self, hidden_size, num_attention_heads, head_ratio, conv_kernel_size, dropout):
super(MixedAttention, self).__init__()
assert hidden_size % num_attention_heads == 0, f'The hidden size ({hidden_size}) is not a multiple of the number of attention'
new_num_attention_heads = num_attention_heads // head_ratio
if new_num_attention_heads < 1:
self.head_ratio = num_attention_heads
self.num_attention_heads = 1
else:
self.num_attention_heads = new_num_attention_heads
self.head_ratio = head_ratio
self.conv_kernel_size = conv_kernel_size
assert hidden_size % num_attention_heads == 0, f'The hidden size ({hidden_size}) is not a multiple of the number of attention'
self.attention_head_size = hidden_size // num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size)
self.key = nn.Linear(hidden_size, self.all_head_size)
self.value = nn.Linear(hidden_size, self.all_head_size)
self.key_conv_attn_layer = SeparableConv1D(hidden_size, self.all_head_size, self.conv_kernel_size)
self.conv_kernel_layer = nn.Linear(self.all_head_size, self.num_attention_heads * self.conv_kernel_size)
self.conv_out_layer = nn.Linear(hidden_size, self.all_head_size)
self.unfold = nn.Unfold(kernel_size=[self.conv_kernel_size, 1], padding=[int((self.conv_kernel_size - 1) / 2), 0])
self.dropout = nn.Dropout(dropout)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, x):
'''
:param x: [batch_size, src_len, hid_dim]
:return:
'''
mixed_query_layer = self.query(x)
batch_size = x.size(0)
mixed_key_layer = self.key(x)
mixed_value_layer = self.value(x)
mixed_key_conv_attn_layer = self.key_conv_attn_layer(x.transpose(1, 2))
mixed_key_conv_attn_layer = mixed_key_conv_attn_layer.transpose(1, 2)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
conv_attn_layer = torch.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
conv_kernel_layer = torch.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
conv_kernel_layer = torch.softmax(conv_kernel_layer, dim=1)
conv_out_layer = self.conv_out_layer(x)
conv_out_layer = torch.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
conv_out_layer = conv_out_layer.transpose(1, 2).contiguous().unsqueeze(-1)
conv_out_layer = nn.functional.unfold(
conv_out_layer,
kernel_size=[self.conv_kernel_size, 1],
dilation=1,
padding=[(self.conv_kernel_size - 1) // 2, 0],
stride=1,
)
conv_out_layer = conv_out_layer.transpose(1, 2).reshape(
batch_size, -1, self.all_head_size, self.conv_kernel_size
)
conv_out_layer = torch.reshape(conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
conv_out_layer = torch.matmul(conv_out_layer, conv_kernel_layer)
conv_out_layer = torch.reshape(conv_out_layer, [-1, self.all_head_size])
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
conv_out = torch.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
context_layer = torch.cat([context_layer, conv_out], 2)
new_context_layer_shape = context_layer.size()[:-2] + (self.head_ratio * self.all_head_size,)
outputs = context_layer.view(*new_context_layer_shape) # [batch_size, src_len, hid_dim]
return outputs
```
#### File: cnn4ie/mlrescnn/model.py
```python
import torch
import torch.nn.functional as F
import torch.nn as nn
from cnn4ie.util import crf
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Encoder(nn.Module):
def __init__(self, emb_dim, hid_dim, n_layers, kernel_size, dropout):
'''
define encoder
:param emb_dim:
:param hid_dim:
:param n_layers:
:param kernel_size:
:param dropout:
'''
super(Encoder, self).__init__()
# for kernel in kernel_size:
assert kernel_size % 2 == 1, 'kernel size must be odd!' # kernel is odd, which is convenient for PAD processing on both sides of the sequence
self.scale = torch.sqrt(torch.FloatTensor([0.5])).to(DEVICE) # the variance of the entire network does not change significantly
self.emb2hid = nn.Linear(emb_dim, hid_dim) # fc: emb_dim -> hid_dim
self.hid2emb = nn.Linear(hid_dim, emb_dim) # fc: hid_dim -> emb_dim
# convolution block
self.convs = nn.ModuleList([nn.Conv1d(in_channels=hid_dim,
out_channels=2 * hid_dim, # the dimension of convolution output,2*hid_dim -> GLU activation function
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2) # padding zero for both side of the sequence, keeping the dimension does't change
for _ in range(n_layers)]) # convolution layer
self.dropout = nn.Dropout(dropout)
#self.BN = nn.BatchNorm1d()
def forward(self, encoder_output):
# encoder_output:[batch_size, src_len, emb_dim]
# emb_dim -> hid_dim, as the input of convolution layers
conv_input = self.emb2hid(encoder_output) # [batch_size, src_len, hid_dim]
# change dimension,convolve the last dimension of input
conv_input = conv_input.permute(0, 2, 1) # [batch_size, hid_dim, src_len]
# convolution block
for i, conv in enumerate(self.convs):
conved = conv(self.dropout(conv_input)) # [batch_size, 2*hid_dim, src_len]
#conved = self.BN(conved) # [batch_size, 2*hid_dim, src_len]
# GLU activation function
conved = F.glu(conved, dim=1) # [batch_size, hid_dim, src_len]
# residual connection
conved = (conved + conv_input) * self.scale # [batch_size, hid_dim, src_len]
# input of the next convolution layer
conv_input = conved
# hid_dim -> emb_dim,as the output of convolution block
conved = self.hid2emb(conved.permute(0, 2, 1)) # [batch_size, src_len, emb_dim]
# residual connection,as the joint output feature of encoder
combined = (conved + encoder_output) * self.scale # [batch_size, src_len, emb_dim]
return conved, combined
class MultiLayerResCNN(nn.Module):
def __init__(self, input_dim, output_dim, emb_dim, hid_dim, cnn_layers, encoder_layers, kernel_size, dropout, PAD_IDX, max_length=100, use_crf = True):
'''
define berc model
:param input_dim:
:param output_dim:
:param emb_dim:
:param hid_dim:
:param cnn_layers:
:param encoder_layers:
:param kernel_size:
:param dropout:
:param padding_idx:
:param max_length:
'''
super(MultiLayerResCNN, self).__init__()
self.tok_embedding = nn.Embedding(input_dim, emb_dim, padding_idx=PAD_IDX) # token embedding
self.pos_embedding = nn.Embedding(max_length, emb_dim, padding_idx=PAD_IDX) # position embedding
self.encoder = nn.ModuleList([Encoder(emb_dim, hid_dim, cnn_layers, kernel_size, dropout)
for _ in range(encoder_layers)])
self.dropout = nn.Dropout(dropout)
self.fc_out = nn.Linear(emb_dim, output_dim)
self.crf = crf.CRF(output_dim, batch_first=True)
self.use_crf = use_crf
def forward(self, token_tensor):
'''
:param token_tensor: [batch_size, src_len]
:return:
'''
# token, position embedding
tok_embedded = self.tok_embedding(token_tensor) # [batch_size, src_len, emb_dim]
# 构建位置tensor -> [batch_size, src_len],位置序号从(0)开始到(src_len-1)
position = torch.arange(0, token_tensor.shape[1]).unsqueeze(0).repeat(token_tensor.shape[0], 1).to(DEVICE)
pos_embedded = self.pos_embedding(position.long()) # [batch_size, src_len, emb_dim]
# token embedded + pos_embedded
embedded = self.dropout(tok_embedded + pos_embedded) # [batch_size, src_len, emb_dim]
encoder_output = embedded
# encoder block
for i, encoder in enumerate(self.encoder):
# encoding
conved, encoder_output = encoder(self.dropout(encoder_output)) # [batch_size, src_len, emb_dim]
# pooling, predict class of the entire sentence
# encoder_output = F.avg_pool1d(encoder_output.permute(0, 2, 1), encoder_output.shape[1]).squeeze(2) # [batch_size, emb_dim]
# output = self.fc_out(encoder_output) # [batch_size, output_dim]
# fc outuput
output = self.fc_out(encoder_output) # [batch_size, src_len, output_dim]
if self.use_crf:
# crf
output = self.crf.decode(output)
return output
def log_likelihood(self, source, target):
'''
:param source: [batch_size, src_len]
:param target: [batch_size, src_len]
:return:
'''
# token, position embedding
tok_embedded = self.tok_embedding(source) # [batch_size, src_len, emb_dim]
# 构建位置tensor -> [batch_size, src_len],位置序号从(0)开始到(src_len-1)
position = torch.arange(0, source.shape[1]).unsqueeze(0).repeat(source.shape[0], 1).to(DEVICE)
pos_embedded = self.pos_embedding(position.long()) # [batch_size, src_len, emb_dim]
# token embedded + pos_embedded
embedded = self.dropout(tok_embedded + pos_embedded) # [batch_size, src_len, emb_dim]
encoder_output = embedded
# encoder block
for i, encoder in enumerate(self.encoder):
# encoding
conved, encoder_output = encoder(self.dropout(encoder_output)) # [batch_size, src_len, emb_dim]
# pooling, predict class of the entire sentence
# encoder_output = F.avg_pool1d(encoder_output.permute(0, 2, 1), encoder_output.shape[1]).squeeze(2) # [batch_size, emb_dim]
# output = self.fc_out(encoder_output) # [batch_size, output_dim]
# sequence labeling
outputs = self.fc_out(encoder_output) # [batch_size, src_len, output_dim]
return -self.crf(outputs, target)
``` |
{
"source": "jiangnanboy/gcn_for_prediction_of_protein_interactions",
"score": 2
} |
#### File: src/graph_nheads_att_gae/predict.py
```python
import os
from configparser import ConfigParser
import numpy as np
import scipy.sparse as sp
from src.util.load_data import load_data
class Predict():
def __init__(self):
self.hidden_emb = None
self.adj_orig = None
def load_model_adj(self, config_path):
'''
load hidden_emb and adj
:param config_path:
:return:
'''
if os.path.exists(config_path) and (os.path.split(config_path)[1].split('.')[0] == 'config') and (os.path.splitext(config_path)[1].split('.')[1] == 'cfg'):
# load config file
config = ConfigParser()
config.read(config_path)
section = config.sections()[0]
# data catalog path
data_catalog = config.get(section, "data_catalog")
# train file path
train_file_name = config.get(section, "train_file_name")
# model save/load path
model_path = config.get(section, "model_path")
if not os.path.exists(model_path) and os.path.exists(os.path.join(data_catalog, train_file_name)):
raise FileNotFoundError('Not found file!')
self.hidden_emb = np.load(model_path)
# load 原始邻接矩阵,除去对角线元素
adj = load_data(os.path.join(data_catalog, train_file_name))
self.adj_orig = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
self.adj_orig.eliminate_zeros()
else:
raise FileNotFoundError('File config.cfg not found : ' + config_path)
def predict(self):
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# 内积
adj_rec = np.dot(self.hidden_emb, self.hidden_emb.T)
adj_rec = sigmoid(adj_rec)
return self.adj_orig, adj_rec
if __name__ == '__main__':
config_path = os.path.join(os.getcwd(), 'config.cfg')
predict = Predict()
predict.load_model_adj(config_path)
adj_orig, adj_rec = predict.predict()
# adj_rec = (adj_rec > 0.5) + 0
# adj_rec = (adj_rec > 0.5).nonzero() # 得到索引
print('adj_orig: {}, \n adj_rec: {}'.format(adj_orig, adj_rec))
``` |
{
"source": "jiangnanboy/gnn4lp",
"score": 2
} |
#### File: src/arga/model.py
```python
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class GraphConvolution(nn.Module):
def __init__(self, input_dim, output_dim, dropout, bias=False):
super(GraphConvolution, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.weight = Parameter(torch.FloatTensor(input_dim, output_dim))
self.reset_parameters()
if bias:
self.bias = Parameter(torch.FloatTensor(output_dim))
nn.init.zeros_(self.bias)
else:
self.register_parameter('bias', None)
self.dropout = nn.Dropout(dropout)
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
def forward(self, input, adj):
# inputs: (N, n_channels), adj: sparse_matrix (N, N)
input = self.dropout(input)
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
output = output + self.bias
return output
class GCNModelARGA(nn.Module):
# The Adversarially Regularized Graph Auto-Encoder model
# 对抗正则化图自编码,利用gae/vgae作为生成器;一个三层前馈网络作判别器
def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout, vae_bool=True):
super(GCNModelARGA, self).__init__()
self.vae_bool = vae_bool
self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout)
self.gc2 = GraphConvolution(hidden_dim1, hidden_dim2, dropout)
self.gc3 = GraphConvolution(hidden_dim1, hidden_dim2, dropout)
self.ip = InnerProductDecoder(dropout)
self.relu = nn.ReLU()
self.discriminator = Discriminator(hidden_dim2, hidden_dim1)
def encode(self, input, adj):
hidden1 = self.relu(self.gc1(input, adj))
return self.gc2(hidden1, adj), self.gc3(hidden1, adj)
def reparameterize(self, mu, logvar):
if self.vae_bool:
std = torch.exp(logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu) # 乘std加mu
else:
return mu
def forward(self, input, adj):
mu, logvar = self.encode(input, adj) #两个GCN分别得到mean和std
z_fake = self.reparameterize(mu, logvar) #得到z_fake
z_real = torch.randn(z_fake.shape).to(DEVICE) # 得到高斯分布的z_real
# 判别器判断真假
dis_real = self.discriminator(z_real)
dis_fake = self.discriminator(z_fake)
return self.ip(z_fake), dis_real, dis_fake, mu, logvar # ip(z_fake)生成邻接矩阵
class InnerProductDecoder(nn.Module):
'''
内积用来做decoder,用来生成邻接矩阵
'''
def __init__(self, dropout):
super(InnerProductDecoder, self).__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, z):
z = self.dropout(z)
adj = torch.mm(z, z.t())
return adj
class Discriminator(nn.Module):
# 判别器
def __init__(self, hidden_dim2, hidden_dim1):
super(Discriminator, self).__init__()
self.fc = nn.Sequential(nn.Linear(hidden_dim2, hidden_dim1),
nn.ReLU(),
nn.Linear(hidden_dim1, hidden_dim2),
nn.ReLU(),
nn.Linear(hidden_dim2, 1))
def forward(self, z):
return self.fc(z)
```
#### File: src/vgae/model.py
```python
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class GraphConvolution(nn.Module):
def __init__(self, input_dim, output_dim, dropout, bias=False):
super(GraphConvolution, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.weight = Parameter(torch.FloatTensor(input_dim, output_dim))
self.reset_parameters()
if bias:
self.bias = Parameter(torch.FloatTensor(output_dim))
nn.init.zeros_(self.bias)
else:
self.register_parameter('bias', None)
self.dropout = nn.Dropout(dropout)
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
def forward(self, input, adj):
# inputs: (N, n_channels), adj: sparse_matrix (N, N)
input = self.dropout(input)
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
output = output + self.bias
return output
class GCNModelVAE(nn.Module):
def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout, vae_bool=True):
super(GCNModelVAE, self).__init__()
self.vae_bool = vae_bool
self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout)
self.gc2 = GraphConvolution(hidden_dim1, hidden_dim2, dropout)
self.gc3 = GraphConvolution(hidden_dim1, hidden_dim2, dropout)
self.ip = InnerProductDecoder(dropout)
self.relu = nn.ReLU()
def encode(self, input, adj):
hidden1 = self.relu(self.gc1(input, adj))
return self.gc2(hidden1, adj), self.gc3(hidden1, adj)
def reparameterize(self, mu, logvar):
if self.vae_bool:
std = torch.exp(logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu) # 乘std加mu
else:
return mu
def forward(self, input, adj):
mu, logvar = self.encode(input, adj) #两个GCN分别得到mean和std
z = self.reparameterize(mu, logvar) #得到z
return self.ip(z), mu, logvar
class InnerProductDecoder(nn.Module):
'''
内积用来做decoder,用来生成邻接矩阵
'''
def __init__(self, dropout):
super(InnerProductDecoder, self).__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, z):
z = self.dropout(z)
adj = torch.mm(z, z.t())
return adj
``` |
{
"source": "jiangnanboy/RNN4IE",
"score": 3
} |
#### File: data/train/process_train.py
```python
import os
import csv
def process_train(train_path, source_path, target_path):
with open(train_path, 'r', encoding='utf-8') as train_read, \
open(source_path, 'w',encoding='utf-8') as source_write, \
open(target_path,'w', encoding='utf-8') as target_write:
source_line = ''
target_line = ''
for token in train_read:
if len(token.strip()) == 0:
source_write.write(source_line.strip())
source_write.write('\n')
target_write.write(target_line.strip())
target_write.write('\n')
source_line = ''
target_line = ''
else:
tokens = token.split()
source_word = tokens[0].strip()
source_word += ' '
target_word = tokens[1].strip()
target_word += ' '
source_line += source_word
target_line += target_word
if source_line != '' and target_line != '':
source_write.write(source_line.strip())
source_write.write('\n')
target_write.write(target_line.strip())
target_write.write('\n')
source_line = ''
target_line = ''
print('process train done!')
def train2csv(train_source, train_target, save_path):
with open(train_source, 'r', encoding='utf-8') as source_read, open(train_target, 'r', encoding='utf-8') as target_read:
with open(save_path, 'w', encoding='utf-8', newline='') as csv_write:
header = ['label', 'source', 'target']
csv_writer = csv.writer(csv_write)
csv_writer.writerow(header)
for source, target in zip(source_read, target_read):
csv_writer.writerow([None, source, target])
print("done!")
if __name__ == '__main__':
source_path = os.path.join(os.getcwd(), 'train_source.txt')
target_path = os.path.join(os.getcwd(), 'train_target.txt')
save_path = os.path.join(os.getcwd(), 'train.csv')
train2csv(source_path, target_path, save_path)
'''
source_path = os.path.join(os.getcwd(), 'train_source.txt')
target_path = os.path.join(os.getcwd(), 'train_target.txt')
train_path = os.path.join(os.getcwd(), 'train.conll')
process_train(train_path, source_path, target_path)
'''
```
#### File: rnn4ie/gru_mhsa/train.py
```python
import torch
import torch.nn.functional as F
import time
import math
import os
from configparser import ConfigParser
import tqdm
import numpy as np
from rnn4ie.gru_mhsa.model import GRUMHSAIE
from dset.get_dataset import build_data_iter
from rnn4ie.util.model_util import init_weights, epoch_time
from rnn4ie.util import define_optimizer
from rnn4ie.util import define_loss
from rnn4ie.util import metrics, crf_util
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Train():
def __init__(self):
pass
def _define_model(self,
input_dim,
output_dim,
emb_dim,
hid_dim,
n_layers,
dropout,
n_heads,
PAD_IDX,
bidirectional,
pretrained_embedding_vocab=None,
init=True,
use_crf=True):
'''
define model
:param input_dim:
:param output_dim
:param emb_dim:
:param hid_dim:
:param n_layers:
:param dropout:
:param n_heads
:param PAD_IDX
:param bidirectional
:param pretrained_embedding_vocab
:param init
:param use_crf:
:return:
'''
model = GRUMHSAIE(input_dim,
emb_dim,
hid_dim,
n_layers,
bidirectional,
output_dim,
dropout,
n_heads,
PAD_IDX,
use_crf=use_crf)
# init model weights
if init:
model.apply(init_weights)
# init model token embedding
if pretrained_embedding_vocab:
model.embedding.weight.data.copy_(pretrained_embedding_vocab.vectors)
UNK_IDX = pretrained_embedding_vocab['<unk>']
# pre-trained weights of the unk and pad word vectors are not trained on our dataset corpus, it is best to set them to zero
model.embedding.weight.data[UNK_IDX] = torch.zeros(emb_dim)
model.embedding.weight.data[PAD_IDX] = torch.zeros(emb_dim)
return model.to(DEVICE)
@staticmethod
def load_model(input_dim,
output_dim,
emb_dim,
hid_dim,
n_layers,
dropout,
n_heads,
PAD_IDX,
bidirectional,
model_path,
use_crf=True):
'''
load model
:param input_dim:
:param output_dim
:param emb_dim:
:param hid_dim:
:param cnn_layers:
:param encoder_layers:
:param kernel_size:
:param dropout:
:param n_heads:
:param PAD_IDX
:param bidirectional
:param model_path
:param use_crf
:return:
'''
model = GRUMHSAIE(input_dim,
emb_dim,
hid_dim,
n_layers,
bidirectional,
output_dim,
dropout,
n_heads,
PAD_IDX,
use_crf=use_crf)
# load model
if os.path.exists(model_path):
model.load_state_dict(torch.load(model_path, map_location=DEVICE))
else:
raise FileNotFoundError('Not found model file!')
return model.to(DEVICE)
def _train(self, model, train_iter, optimizer, criterion, clip):
'''
trainning module
:param model:
:param iterator:
:param optimizer:
:param criterion:
:param clip:
:return:
'''
model.train()
epoch_loss = 0
if model.use_crf:
for batch in train_iter:
source, input_lengths = batch.source
target, _ = batch.target
source = source.to(DEVICE)
target = target.to(DEVICE)
optimizer.zero_grad()
loss = model.log_likelihood(source, target, input_lengths)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(train_iter)
else:
for i, batch in enumerate(train_iter):
source, input_lengths = batch.source
target, _ = batch.target
source = source.to(DEVICE)
target = target.to(DEVICE)
optimizer.zero_grad()
out = model(source, input_lengths) # [batch_size, src_len, output_dim]
out = out.view(-1, out.shape[-1]) # [batch_size * src_len, output_dim]
out = out.contiguous().view(-1, out.shape[-1]) # [batch_size * src_len, output_dim]
target = target.contiguous().view(-1) # [batch_size * src_len]
# loss
loss = criterion(out, target)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(train_iter)
def _validate(self, model, val_iter, criterion):
'''
validation module
:param model:
:param iterator:
:param criterion:
:return:
'''
model.eval()
epoch_loss = 0
if model.use_crf:
with torch.no_grad():
preds, labels = [], []
for batch in val_iter:
source, input_lengths = batch.source
target, _ = batch.target
source = source.to(DEVICE)
target = target.to(DEVICE)
out = model(source, input_lengths) # [batch_size, src_len, output_dim]
# the length of non-zero true labels
non_zero = []
for i in target.cpu():
tmp = []
for j in i:
if j.item() > 0:
tmp.append(j.item())
non_zero.append(tmp)
for index, i in enumerate(out):
preds += i[:len(non_zero[index])]
for index, i in enumerate(target.tolist()):
labels += i[:len(non_zero[index])]
# loss
loss = model.log_likelihood(source, target, input_lengths)
epoch_loss += loss.item()
# p,r,f1 metrics
report = metrics.classification_report_f_r_f1(labels, preds)
return epoch_loss / len(val_iter), report
else:
with torch.no_grad():
labels = np.array([])
predicts = np.array([])
for batch in tqdm.tqdm(val_iter):
source, input_lengths = batch.source
target, _ = batch.target
source = source.to(DEVICE)
target = target.to(DEVICE)
out = model(source, input_lengths) # [batch_size, src_len, output_dim]
out = out.view(-1, out.shape[-1]) # [batch_size * src_len, output_dim]
out = out.contiguous().view(-1, out.shape[-1]) # [batch_size * src_len, output_dim]
target = target.contiguous().view(-1) # [batch_size * src_len]
# p,r,f1 metrics
prediction = torch.max(F.softmax(out, dim=1), dim=1)[1]
pred_y = prediction.cpu().data.numpy().squeeze()
target_y = target.cpu().data.numpy()
labels = np.append(labels, target_y)
predicts = np.append(predicts, pred_y)
# loss
loss = criterion(out, target)
epoch_loss += loss.item()
report = metrics.classification_report_f_r_f1(labels, predicts)
return epoch_loss / len(val_iter), report
def _validate_2(self, model, val_iter, criterion, tags, tags_map):
'''
validation PER,ORG,LOC,T
:param model:
:param val_iter:
:param criterion:
:param tags
:param tags_map
:return:
'''
model.eval()
epoch_loss = 0
if model.use_crf:
with torch.no_grad():
for batch in val_iter:
source, input_lengths = batch.source
target, _ = batch.target
source = source.to(DEVICE)
target = target.to(DEVICE)
out = model(source, input_lengths) # [batch_size, src_len, output_dim]
print('\treport:')
for tag in tags:
crf_util.f1_score(target, out, tag, tags_map)
# loss
loss = model.log_likelihood(source, target, input_lengths)
epoch_loss += loss.item()
return epoch_loss / len(val_iter)
else:
with torch.no_grad():
for batch in tqdm.tqdm(val_iter):
source, input_lengths = batch.source
target, _ = batch.target
source = source.to(DEVICE)
target = target.to(DEVICE)
out = model(source, input_lengths) # [batch_size, src_len, output_dim]
print('\treport')
for tag in tags:
crf_util.f1_score(target, out, tag, tags_map)
# loss
loss = criterion(out, target)
epoch_loss += loss.item()
return epoch_loss / len(val_iter)
def _train_val_main(self, model, optimizer, criterion, clip, n_epochs, train_iter, val_iter, model_path):
'''
trainning and validation
:param model:
:param optimizer:
:param criterion:
:param clip:
:param n_epochs:
:param train_iter:
:param val_iter:
:param model_path:
:return:
'''
best_valid_loss = float('inf')
# use crf
if model.use_crf:
for epoch in range(n_epochs):
start_time = time.time()
train_loss = self._train(model, train_iter, optimizer, criterion, clip)
valid_loss, report = self._validate(model, val_iter, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), model_path)
try:
train_ppl = math.exp(train_loss)
except OverflowError:
train_ppl = float('inf')
try:
val_ppl = math.exp(valid_loss)
except OverflowError:
val_ppl = float('inf')
print(f'Epoch: {epoch + 1:02} | Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {train_ppl}')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {val_ppl}')
print(f'\t Val. report: {report}')
else:
for epoch in range(n_epochs):
start_time = time.time()
train_loss = self._train(model, train_iter, optimizer, criterion, clip)
valid_loss, report = self._validate(model, val_iter, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), model_path)
try:
train_ppl = math.exp(train_loss)
except OverflowError:
train_ppl = float('inf')
try:
val_ppl = math.exp(valid_loss)
except OverflowError:
val_ppl = float('inf')
print(f'Epoch: {epoch + 1:02} | Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {train_ppl}')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {val_ppl}')
print(f'\t Val. report: {report}')
def train_model(self, config_path):
if os.path.exists(config_path) and (os.path.split(config_path)[1].split('.')[0] == 'config') and (
os.path.splitext(config_path)[1].split('.')[1] == 'cfg'):
# load config file
config = ConfigParser()
config.read(config_path)
section = config.sections()[0]
# train and val file
data_catalog = config.get(section, "data_catalog")
# data_catalog = os.path.join(os.path.dirname(os.path.abspath('..')), data_catalog)
train_file_name = config.get(section, "train_file_name")
validation_file_name = config.get(section, "validation_file_name")
# save vocabs of source, target, label
source_vocab_path = config.get(section, "source_vocab_path")
# source_vocab_path = os.path.join(os.path.dirname(os.path.abspath('..')), 'data', source_vocab_path)
target_vocab_path = config.get(section, "target_vocab_path")
# target_vocab_path = os.path.join(os.path.dirname(os.path.abspath('..')), 'data', target_vocab_path)
label_vocab_path = config.get(section, "label_vocab_path")
# label_vocab_path = os.path.join(os.path.dirname(os.path.abspath('..')), 'data', label_vocab_path)
pretrained_embedding_path = config.get(section, "pretrained_embedding_path")
# pretrained_embedding_path = os.path.join(os.path.dirname(os.path.abspath('..')), 'data', pretrained_embedding_path)
# model save/load path
model_path = config.get(section, "model_path")
# model_path = os.path.join(os.path.dirname(os.path.abspath('..')), "model", model_path)
# model param config
input_dim = config.getint(section, "input_dim")
output_dim = config.getint(section, "output_dim")
emb_dim = config.getint(section, "emb_dim")
hid_dim = config.getint(section, "hid_dim")
n_layers = config.getint(section, "n_layers")
dropout = config.getfloat(section, "dropout")
n_heads = config.getint(section, 'n_heads')
bidirectional = config.getboolean(section, 'bidirectional')
max_length = config.getint(section, "max_length")
lr = config.getfloat(section, "lr")
lr_decay = config.getfloat(section, 'lr_decay')
weight_decay = config.getfloat(section, "weight_decay")
gamma = config.getfloat(section, "gamma")
momentum = config.getfloat(section, "momentum")
eps = config.getfloat(section, "eps")
batch_size = config.getint(section, "batch_size")
clip = config.getfloat(section, "clip")
n_epochs = config.getint(section, "n_epochs")
optimizer_name = config.get(section, "optimizer")
loss_name = config.get(section, "loss")
pretrained_embedding_vocab = None
# load pretrained embedding from file
if os.path.exists(pretrained_embedding_path):
# get train and val data, source_dict_size_embedding, target dict size, padding_idx
train_iter, val_iter, pretrained_embedding_vocab, output_dim, PAD_IDX = build_data_iter(data_catalog,
train_file_name,
validation_file_name,
source_vocab_path,
target_vocab_path,
label_vocab_path,
batch_size,
max_length,
pretrained_embedding_path)
input_dim = pretrained_embedding_vocab.vectors.shape[0]
emb_dim = pretrained_embedding_vocab.vectors.shape[1]
else:
# get train and val data, source dict size, target dict size size, padding_idx
train_iter, val_iter, input_dim, output_dim, PAD_IDX = build_data_iter(data_catalog,
train_file_name,
validation_file_name,
source_vocab_path,
target_vocab_path,
label_vocab_path,
batch_size,
max_length)
# define loss
if loss_name == 'crf':
criterion = None
use_crf = True
elif loss_name == 'ce':
criterion = define_loss.define_loss_ce(PAD_IDX)
use_crf = False
elif loss_name == 'bce':
criterion = define_loss.define_loss_bce()
use_crf = False
elif loss_name == 'bcelogits':
criterion = define_loss.define_loss_bcelogits()
use_crf = False
else:
raise NameError('No define loss function name!')
print('input_dim:{}'.format(input_dim))
print('emb_dim:{}'.format(emb_dim))
# define model
model = self._define_model(input_dim,
output_dim,
emb_dim,
hid_dim,
n_layers,
dropout,
n_heads,
PAD_IDX,
bidirectional,
pretrained_embedding_vocab,
True,
use_crf)
# define optimizer
if optimizer_name == 'adam':
optimizer = define_optimizer.define_optimizer_adam(model, lr=lr, weight_decay=weight_decay)
elif optimizer_name == 'adamw':
optimizer = define_optimizer.define_optimizer_adamw(model, lr=lr, weight_decay=weight_decay)
elif optimizer_name == 'sgd':
optimizer = define_optimizer.define_optimizer_sgd(model, lr=lr, momentum=momentum,
weight_decay=weight_decay)
elif optimizer_name == 'adagrad':
optimizer = define_optimizer.define_optimizer_adagrad(model, lr=lr, lr_decay=lr_decay,
weight_decay=weight_decay)
elif optimizer_name == 'rmsprop':
optimizer = define_optimizer.define_optimizer_rmsprop(model, lr=lr, weight_decay=weight_decay,
momentum=momentum)
elif optimizer_name == 'adadelta':
optimizer = define_optimizer.define_optimizer_adadelta(model, lr=lr, weight_decay=weight_decay)
else:
raise NameError('No define optimization function name!')
# train and validate
self._train_val_main(model, optimizer, criterion, clip, n_epochs, train_iter, val_iter, model_path)
else:
raise FileNotFoundError('File config.cfg not found : ' + config_path)
```
#### File: rnn4ie/util/define_loss.py
```python
import torch
import torch.nn.functional as F
from torch import nn
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=2, size_average=True, ignore_index=-100): # gamma : [0, 5]
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.ignore_index = ignore_index
self.size_average = size_average
def forward(self, inputs, targets):
ce_loss = F.cross_entropy(inputs, targets, reduction='none', ignore_index=self.ignore_index)
pt = torch.exp(-ce_loss)
focal_loss = self.alpha * (1 - pt) ** self.gamma * ce_loss
if self.size_average:
return focal_loss.mean()
else:
return focal_loss.sum()
def define_loss_fce(PAD_IDX=None):
'''
define focal loss
:param PAD_IDX:
:return:
'''
criterion = FocalLoss(ignore_index=PAD_IDX)
return criterion
def define_loss_ce(PAD_IDX=None):
'''
define loss function CE
:param PAD_IDX:
:return:
'''
criterion = nn.CrossEntropyLoss(ignore_index=PAD_IDX)
return criterion
def define_loss_bce():
'''
define loss function BCE
:return:
'''
criterion = nn.BCELoss()
return criterion
def define_loss_bcelogits():
'''
define loss function BCELogit
:return:
'''
criterion = nn.BCEWithLogitsLoss()
return criterion
``` |
{
"source": "JiangNanMax/mysite",
"score": 2
} |
#### File: mysite/about/views.py
```python
from django.shortcuts import render
# Create your views here.
def aboutme(request):
return render(request, 'about/aboutme.html')
```
#### File: mysite/blog/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericRelation
from ckeditor.fields import RichTextField
from read_statistics.models import ReadNumExpandMethod, ReadDetail
##
from mdeditor.fields import MDTextField
# Create your models here.
class BlogType(models.Model):
type_name = models.CharField(max_length=20)
def __str__(self):
return self.type_name
class Blog(models.Model, ReadNumExpandMethod):
title = models.CharField(max_length=50)
blog_type = models.ForeignKey(BlogType, on_delete=models.CASCADE)
#content = RichTextField()
content = MDTextField()
author = models.ForeignKey(User, on_delete=models.CASCADE)
read_details = GenericRelation(ReadDetail)
created_time = models.DateTimeField(auto_now_add=True)
last_updated_time = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "<Blog: %s>" % self.title
class Meta:
ordering = ['-created_time']
```
#### File: mysite/mysite/views.py
```python
from django.shortcuts import render, redirect
from django.contrib.contenttypes.models import ContentType
from django.utils import timezone
import datetime
from django.db.models import Sum
from django.core.cache import cache
from django.urls import reverse
from read_statistics.utils import get_week_read_data
from blog.models import Blog
def get_week_hot_data():
today = timezone.now().date()
date = today - datetime.timedelta(days=7)
#???
blogs = Blog.objects.filter(read_details__date__lt=today, read_details__date__gte=date).values('id', 'title').annotate(read_num_sum=Sum('read_details__read_num')).order_by('-read_num_sum')
return blogs[:8]
def home(request):
blog_content_type = ContentType.objects.get_for_model(Blog)
dates, read_nums = get_week_read_data(blog_content_type)
# 获取缓存数据
week_hot_data = cache.get('week_hot_data')
if week_hot_data is None:
week_hot_data = get_week_hot_data()
cache.set('week_hot_data', week_hot_data, 3600)
context = {}
context['dates'] = dates
context['read_nums'] = read_nums
context['week_hot_data'] = week_hot_data
return render(request, 'home.html', context)
``` |
{
"source": "JiangNanMax/ProxyPool",
"score": 2
} |
#### File: ProxyPool/proxypool/errors.py
```python
class ProxyPoolEmptyError(Exception):
def __init__(self):
super.__init__(self)
def __str__(self):
return repr("The Proxy Pool Is Empty!")
# ip代理网站资源爬取完毕的情况
class ProxyResourceDepletionError(Exception):
def __init__(self):
super.__init__(self)
def __str__(self):
return repr("The Proxy Resource Is Exhausted!")
``` |
{
"source": "JiangNanMax/qp12306",
"score": 2
} |
#### File: JiangNanMax/qp12306/main.py
```python
from PyQt5.Qt import *
from Login_Pane import LoginPane
from Query_Pane import QueryPane
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
login_pane = LoginPane()
login_pane.show()
query_pane = QueryPane()
def success_login_slot(content):
print(content)
login_pane.hide()
query_pane.setWindowTitle(content)
query_pane.show()
login_pane.success_login.connect(success_login_slot)
sys.exit(app.exec())
```
#### File: JiangNanMax/qp12306/Query_Pane.py
```python
from PyQt5.Qt import *
from resource.query_pane_ui import Ui_Form
from API.API_Tool import APITool
class QueryPane(QWidget, Ui_Form):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.setupUi(self)
self.setupData()
def setupData(self):
station_dic = APITool.get_all_stations()
self.from_station_cb.addItems(station_dic.keys())
self.to_station_cb.addItems(station_dic.keys())
from_completer = QCompleter(station_dic.keys())
self.from_station_cb.setCompleter(from_completer)
def check_data(cb):
current_station = cb.currentText()
result = station_dic.keys().__contains__(current_station)
#print(result)
if not result:
cb.clearEditText()
self.from_station_cb.lineEdit().editingFinished.connect(lambda : check_data(self.from_station_cb))
to_completer = QCompleter(station_dic.keys())
self.to_station_cb.setCompleter(to_completer)
self.to_station_cb.lineEdit().editingFinished.connect(lambda : check_data(self.to_station_cb))
self.start_date_edit.setDate(QDate.currentDate())
self.start_date_edit.setMinimumDate(QDate.currentDate())
# 设置表格的头部数据
model = QStandardItemModel(self.tickets_tv)
# 设置模型的头部数据
headers = ["车次", "出发站->到达站", "出发时间->到达时间", "历时", "商务座-特等座",
"一等座","二等座", "高级软卧", "软卧-一等卧", "动卧","硬卧-二等卧",
"硬座", "无座", "其他"]
model.setColumnCount(len(headers))
for idx, header in enumerate(headers):
model.setHeaderData(idx, Qt.Horizontal, header)
self.tickets_tv.setModel(model)
def query_tickets(self):
print("查询票")
start_date = self.start_date_edit.text()
print(start_date)
station_dic = APITool.get_all_stations()
from_station_code = station_dic[self.from_station_cb.currentText()]
to_station_code = station_dic[self.to_station_cb.currentText()]
print(from_station_code, to_station_code)
purpose_codes = self.buttonGroup.checkedButton().property("q_value")
print(purpose_codes)
result, result_len = APITool.query_tickts(start_date, from_station_code, to_station_code, purpose_codes)
#print(result_len)
#for i in result:
# print(i)
model = self.tickets_tv.model()
model.setRowCount(result_len)
cols = ["train_name",("from_station_name","to_station_name"),("start_time","arrive_time"),"total_time","business_seat","first_seat",
"second_seat", "vip_soft_bed", "soft_bed", "move_bed", "hard_bed", "hard_seat", "no_seat", "other_seat"]
for row, train_dic in enumerate(result):
#print(train_dic)
for col, col_name in enumerate(cols):
if type(col_name) == str:
model.setItem(row, col, QStandardItem(train_dic[col_name]))
else:
tmp = "->".join([train_dic[key] for key in col_name])
model.setItem(row, col, QStandardItem(tmp))
self.tickets_tv.setModel(model)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = QueryPane()
window.show()
sys.exit(app.exec())
```
#### File: qp12306/resource/login_ui.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(600, 500)
Form.setMinimumSize(QtCore.QSize(600, 500))
Form.setMaximumSize(QtCore.QSize(600, 500))
self.horizontalLayout = QtWidgets.QHBoxLayout(Form)
self.horizontalLayout.setObjectName("horizontalLayout")
self.widget = QtWidgets.QWidget(Form)
self.widget.setMinimumSize(QtCore.QSize(450, 400))
self.widget.setMaximumSize(QtCore.QSize(450, 400))
self.widget.setStyleSheet("")
self.widget.setObjectName("widget")
self.gridLayout = QtWidgets.QGridLayout(self.widget)
self.gridLayout.setObjectName("gridLayout")
self.account_le = QtWidgets.QLineEdit(self.widget)
self.account_le.setMinimumSize(QtCore.QSize(0, 45))
self.account_le.setClearButtonEnabled(True)
self.account_le.setObjectName("account_le")
self.gridLayout.addWidget(self.account_le, 0, 0, 1, 2)
self.pwd_le = QtWidgets.QLineEdit(self.widget)
self.pwd_le.setMinimumSize(QtCore.QSize(0, 45))
self.pwd_le.setEchoMode(QtWidgets.QLineEdit.Password)
self.pwd_le.setClearButtonEnabled(True)
self.pwd_le.setObjectName("pwd_le")
self.gridLayout.addWidget(self.pwd_le, 1, 0, 1, 2)
self.pushButton = QtWidgets.QPushButton(self.widget)
self.pushButton.setMinimumSize(QtCore.QSize(80, 80))
self.pushButton.setMaximumSize(QtCore.QSize(80, 80))
self.pushButton.setObjectName("pushButton")
self.gridLayout.addWidget(self.pushButton, 2, 0, 1, 1)
self.yzm_label = SzLabel(self.widget)
self.yzm_label.setMinimumSize(QtCore.QSize(293, 190))
self.yzm_label.setMaximumSize(QtCore.QSize(293, 190))
self.yzm_label.setStyleSheet("background-color: rgb(85, 255, 255);")
self.yzm_label.setTextFormat(QtCore.Qt.PlainText)
self.yzm_label.setObjectName("yzm_label")
self.gridLayout.addWidget(self.yzm_label, 2, 1, 1, 1)
self.login_btn = QtWidgets.QPushButton(self.widget)
self.login_btn.setEnabled(False)
self.login_btn.setMinimumSize(QtCore.QSize(0, 50))
self.login_btn.setObjectName("login_btn")
self.gridLayout.addWidget(self.login_btn, 3, 0, 1, 2)
self.horizontalLayout.addWidget(self.widget)
self.retranslateUi(Form)
self.pushButton.clicked.connect(Form.refresh_yzm)
self.login_btn.clicked.connect(Form.check_login)
self.account_le.textChanged['QString'].connect(Form.auto_enable_login_btn)
self.pwd_le.textChanged['QString'].connect(Form.auto_enable_login_btn)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "12306抢票"))
self.account_le.setPlaceholderText(_translate("Form", "请输入你的12306账号"))
self.pwd_le.setPlaceholderText(_translate("Form", "请输入密码"))
self.pushButton.setText(_translate("Form", "刷新"))
self.yzm_label.setText(_translate("Form", " 正在下载验证码..."))
self.login_btn.setText(_translate("Form", "登录"))
from Sz_Label import SzLabel
``` |
{
"source": "jiangnanyida/pykg2vec",
"score": 3
} |
#### File: pykg2vec/core/Rescal.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from pykg2vec.core.KGMeta import ModelMeta
class Rescal(ModelMeta):
"""`A Three-Way Model for Collective Learning on Multi-Relational Data`_
RESCAL is a tensor factorization approach to knowledge representation learning,
which is able to perform collective learning via the latent components of the factorization.
Args:
config (object): Model configuration parameters.
Attributes:
config (object): Model configuration.
model_name (str): Name of the model.
data_stats (object): Class object with knowlege graph statistics.
Examples:
>>> from pykg2vec.core.Rescal import Rescal
>>> from pykg2vec.utils.trainer import Trainer
>>> model = Rescal()
>>> trainer = Trainer(model=model, debug=False)
>>> trainer.build_model()
>>> trainer.train_model()
Portion of the code based on mnick_ and `OpenKE_Rescal`_.
.. _mnick: https://github.com/mnick/rescal.py/blob/master/rescal/rescal.py
.. _OpenKE_Rescal: https://github.com/thunlp/OpenKE/blob/master/models/RESCAL.py
.. _A Three-Way Model for Collective Learning on Multi-Relational Data : http://www.icml-2011.org/papers/438_icmlpaper.pdf
"""
def __init__(self, config):
self.config = config
self.data_stats = self.config.kg_meta
self.model_name = 'Rescal'
def def_inputs(self):
"""Defines the inputs to the model.
Attributes:
pos_h (Tensor): Positive Head entities ids.
pos_r (Tensor): Positive Relation ids of the triple.
pos_t (Tensor): Positive Tail entity ids of the triple.
neg_h (Tensor): Negative Head entities ids.
neg_r (Tensor): Negative Relation ids of the triple.
neg_t (Tensor): Negative Tail entity ids of the triple.
test_h_batch (Tensor): Batch of head ids for testing.
test_r_batch (Tensor): Batch of relation ids for testing
test_t_batch (Tensor): Batch of tail ids for testing.
"""
with tf.name_scope("read_inputs"):
self.pos_h = tf.placeholder(tf.int32, [None])
self.pos_t = tf.placeholder(tf.int32, [None])
self.pos_r = tf.placeholder(tf.int32, [None])
self.neg_h = tf.placeholder(tf.int32, [None])
self.neg_t = tf.placeholder(tf.int32, [None])
self.neg_r = tf.placeholder(tf.int32, [None])
self.test_h_batch = tf.placeholder(tf.int32, [None])
self.test_t_batch = tf.placeholder(tf.int32, [None])
self.test_r_batch = tf.placeholder(tf.int32, [None])
def def_parameters(self):
"""Defines the model parameters.
Attributes:
num_total_ent (int): Total number of entities.
num_total_rel (int): Total number of relations.
k (Tensor): Size of the latent dimesnion for entities and relations.
ent_embeddings (Tensor Variable): Lookup variable containing embedding of the entities.
rel_matrices (Tensor Variable): Transformation matrices for entities into relation space.
parameter_list (list): List of Tensor parameters.
"""
num_total_ent = self.data_stats.tot_entity
num_total_rel = self.data_stats.tot_relation
k = self.config.hidden_size
with tf.name_scope("embedding"):
# A: per each entity, store its embedding representation.
self.ent_embeddings = tf.get_variable(name="ent_embedding",
shape=[num_total_ent, k],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
# M: per each relation, store a matrix that models the interactions between entity embeddings.
self.rel_matrices = tf.get_variable(name="rel_matrices",
shape=[num_total_rel, k * k],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
self.parameter_list = [self.ent_embeddings, self.rel_matrices]
def cal_truth_val(self, h, r, t):
"""Function to calculate truth value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns Tensors.
"""
# dim of h: [m, k, 1]
# r: [m, k, k]
# t: [m, k, 1]
return tf.reduce_sum(h * tf.matmul(r, t), [1, 2])
def def_loss(self):
"""Defines the loss function for the algorithm."""
k = self.config.hidden_size
with tf.name_scope('normalization'):
self.ent_embeddings = tf.nn.l2_normalize(self.ent_embeddings, axis=1)
self.rel_matrices = tf.nn.l2_normalize(self.rel_matrices, axis=1)
with tf.name_scope('lookup_embeddings'):
pos_h_e = tf.nn.embedding_lookup(self.ent_embeddings, self.pos_h)
pos_r_e = tf.nn.embedding_lookup(self.rel_matrices, self.pos_r)
pos_t_e = tf.nn.embedding_lookup(self.ent_embeddings, self.pos_t)
neg_h_e = tf.nn.embedding_lookup(self.ent_embeddings, self.neg_h)
neg_r_e = tf.nn.embedding_lookup(self.rel_matrices, self.neg_r)
neg_t_e = tf.nn.embedding_lookup(self.ent_embeddings, self.neg_t)
with tf.name_scope('reshaping'):
pos_h_e = tf.reshape(pos_h_e, [-1, k, 1])
pos_r_e = tf.reshape(pos_r_e, [-1, k, k])
pos_t_e = tf.reshape(pos_t_e, [-1, k, 1])
neg_h_e = tf.reshape(neg_h_e, [-1, k, 1])
neg_r_e = tf.reshape(neg_r_e, [-1, k, k])
neg_t_e = tf.reshape(neg_t_e, [-1, k, 1])
pos_score = self.cal_truth_val(pos_h_e, pos_r_e, pos_t_e)
neg_score = self.cal_truth_val(neg_h_e, neg_r_e, neg_t_e)
self.loss = tf.reduce_sum(tf.maximum(neg_score + self.config.margin - pos_score, 0))
def test_batch(self):
"""Function that performs batch testing for the algorithm.
Returns:
Tensors: Returns ranks of head and tail.
"""
num_entity = self.data_stats.tot_entity
k = self.config.hidden_size
h_vec, r_vec, t_vec = self.embed(self.test_h_batch, self.test_r_batch, self.test_t_batch)
h_sim = tf.tensordot(tf.squeeze(tf.matmul(r_vec, t_vec), axis=-1), self.ent_embeddings, axes=((-1), (-1)))
t_sim = tf.squeeze(tf.tensordot(tf.matmul(tf.reshape(h_vec, [-1, 1, k]), r_vec),
self.ent_embeddings, axes=((-1), (-1))), axis=1)
_, head_rank = tf.nn.top_k(tf.negative(h_sim), k=num_entity)
_, tail_rank = tf.nn.top_k(tf.negative(t_sim), k=num_entity)
return head_rank, tail_rank
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
k = self.config.hidden_size
emb_h = tf.nn.embedding_lookup(tf.nn.l2_normalize(self.ent_embeddings, axis=1), h)
emb_r = tf.nn.embedding_lookup(tf.nn.l2_normalize(self.rel_matrices, axis=1), r)
emb_t = tf.nn.embedding_lookup(tf.nn.l2_normalize(self.ent_embeddings, axis=1), t)
#
emb_h = tf.reshape(emb_h, [-1, k, 1])
emb_r = tf.reshape(emb_r, [-1, k, k])
emb_t = tf.reshape(emb_t, [-1, k, 1])
return emb_h, emb_r, emb_t
def get_embed(self, h, r, t, sess):
"""Function to get the embedding value in numpy.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
sess (object): Tensorflow Session object.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
emb_h, emb_r, emb_t = self.embed(h, r, t)
h, r, t = sess.run([emb_h, emb_r, emb_t])
return h, r, t
def get_proj_embed(self, h, r, t, sess):
""""Function to get the projected embedding value in numpy.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
sess (object): Tensorflow Session object.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
return self.get_embed(h, r, t, sess)
```
#### File: pykg2vec/core/TransD.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from pykg2vec.core.KGMeta import ModelMeta
class TransD(ModelMeta):
""" `Knowledge Graph Embedding via Dynamic Mapping Matrix`_
TransD constructs a dynamic mapping matrix for each entity-relation pair by considering the diversity of entities and relations simultaneously.
Compared with TransR/CTransR, TransD has fewer parameters and has no matrix vector multiplication.
Args:
config (object): Model configuration parameters.
Attributes:
config (object): Model configuration.
model_name (str): Name of the model.
data_stats (object): Class object with knowlege graph statistics.
Examples:
>>> from pykg2vec.core.TransD import TransD
>>> from pykg2vec.utils.trainer import Trainer
>>> model = TransD()
>>> trainer = Trainer(model=model, debug=False)
>>> trainer.build_model()
>>> trainer.train_model()
Portion of the code based on `OpenKE_TransD`_.
.. _OpenKE_TransD:
https://github.com/thunlp/OpenKE/blob/master/models/TransD.py
.. _Knowledge Graph Embedding via Dynamic Mapping Matrix:
https://www.aclweb.org/anthology/P15-1067
"""
def __init__(self, config):
self.config = config
self.data_stats = self.config.kg_meta
self.model_name = 'TransD'
def def_inputs(self):
"""Defines the inputs to the model.
Attributes:
pos_h (Tensor): Positive Head entities ids.
pos_r (Tensor): Positive Relation ids of the triple.
pos_t (Tensor): Positive Tail entity ids of the triple.
neg_h (Tensor): Negative Head entities ids.
neg_r (Tensor): Negative Relation ids of the triple.
neg_t (Tensor): Negative Tail entity ids of the triple.
test_h_batch (Tensor): Batch of head ids for testing.
test_r_batch (Tensor): Batch of relation ids for testing
test_t_batch (Tensor): Batch of tail ids for testing.
"""
self.pos_h = tf.placeholder(tf.int32, [None])
self.pos_t = tf.placeholder(tf.int32, [None])
self.pos_r = tf.placeholder(tf.int32, [None])
self.neg_h = tf.placeholder(tf.int32, [None])
self.neg_t = tf.placeholder(tf.int32, [None])
self.neg_r = tf.placeholder(tf.int32, [None])
self.test_h_batch = tf.placeholder(tf.int32, [None])
self.test_t_batch = tf.placeholder(tf.int32, [None])
self.test_r_batch = tf.placeholder(tf.int32, [None])
def def_parameters(self):
"""Defines the model parameters.
Attributes:
num_total_ent (int): Total number of entities.
num_total_rel (int): Total number of relations.
k (Tensor): Size of the latent dimesnion for relations.
d (Tensor): Size of the latent dimesnion for entities .
ent_embeddings (Tensor Variable): Lookup variable containing embedding of the entities.
rel_embeddings (Tensor Variable): Lookup variable containing embedding of the relations.
ent_mappings (Tensor Variable): Lookup variable containing mapping for entities.
rel_mappings (Tensor Variable): Lookup variable containing mapping for relations.
parameter_list (list): List of Tensor parameters.
"""
num_total_ent = self.data_stats.tot_entity
num_total_rel = self.data_stats.tot_relation
d = self.config.ent_hidden_size
k = self.config.rel_hidden_size
with tf.name_scope("embedding"):
self.ent_embeddings = tf.get_variable(name="ent_embedding",
shape=[num_total_ent, d],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
self.rel_embeddings = tf.get_variable(name="rel_embedding",
shape=[num_total_rel, k],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
self.ent_mappings = tf.get_variable(name="ent_mappings",
shape=[num_total_ent, d],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
self.rel_mappings = tf.get_variable(name="rel_mappings",
shape=[num_total_rel, k],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
self.parameter_list = [self.ent_embeddings, self.rel_embeddings, self.ent_mappings, self.rel_mappings]
def def_loss(self):
"""Defines the loss function for the algorithm."""
pos_h_e, pos_r_e, pos_t_e = self.embed(self.pos_h, self.pos_r, self.pos_t)
neg_h_e, neg_r_e, neg_t_e = self.embed(self.neg_h, self.neg_r, self.neg_t)
pos_h_m, pos_r_m, pos_t_m = self.get_mapping(self.pos_h, self.pos_r, self.pos_t)
neg_h_m, neg_r_m, neg_t_m = self.get_mapping(self.neg_h, self.neg_r, self.neg_t)
pos_h_e = tf.nn.l2_normalize(pos_h_e + tf.reduce_sum(pos_h_e * pos_h_m, -1, keepdims=True) * pos_r_m, -1)
pos_r_e = tf.nn.l2_normalize(pos_r_e, -1)
pos_t_e = tf.nn.l2_normalize(pos_t_e + tf.reduce_sum(pos_t_e * pos_t_m, -1, keepdims=True) * pos_r_m, -1)
neg_h_e = tf.nn.l2_normalize(neg_h_e + tf.reduce_sum(neg_h_e * neg_h_m, -1, keepdims=True) * neg_r_m, -1)
neg_r_e = tf.nn.l2_normalize(neg_r_e, -1)
neg_t_e = tf.nn.l2_normalize(neg_t_e + tf.reduce_sum(neg_t_e * neg_t_m, -1, keepdims=True) * neg_r_m, -1)
score_pos = self.distance(pos_h_e, pos_r_e, pos_t_e)
score_neg = self.distance(neg_h_e, neg_r_e, neg_t_e)
self.loss = tf.reduce_sum(tf.maximum(score_pos - score_neg + self.config.margin, 0))
def test_batch(self):
"""Function that performs batch testing for the algorithm.
Returns:
Tensors: Returns ranks of head and tail.
"""
num_total_ent = self.data_stats.tot_entity
head_vec, rel_vec, tail_vec = self.embed(self.test_h_batch, self.test_r_batch, self.test_t_batch)
h_m, r_m, t_m = self.get_mapping(self.test_h_batch, self.test_r_batch, self.test_t_batch)
head_vec = tf.nn.l2_normalize(head_vec + tf.reduce_sum(head_vec * h_m, -1, keepdims=True) * r_m, -1)
rel_vec = tf.nn.l2_normalize(rel_vec, -1)
tail_vec = tf.nn.l2_normalize(tail_vec + tf.reduce_sum(tail_vec * t_m, -1, keepdims=True) * r_m, -1)
norm_ent_embeddings = tf.nn.l2_normalize(self.ent_embeddings, -1)
norm_ent_mappings = tf.nn.l2_normalize(self.ent_mappings, -1)
project_ent_embedding = tf.nn.l2_normalize(
norm_ent_embeddings + tf.reduce_sum(norm_ent_embeddings * norm_ent_mappings, -1, keepdims=True) * tf.expand_dims(r_m, axis=1), -1)
score_head = self.distance(project_ent_embedding,
tf.expand_dims(rel_vec, axis=1),
tf.expand_dims(tail_vec, axis=1), axis=2)
score_tail = self.distance(tf.expand_dims(head_vec, axis=1),
tf.expand_dims(rel_vec, axis=1),
project_ent_embedding, axis=2)
_, head_rank = tf.nn.top_k(score_head, k=num_total_ent)
_, tail_rank = tf.nn.top_k(score_tail, k=num_total_ent)
return head_rank, tail_rank
def distance(self, h, r, t, axis=1):
"""Function to calculate distance measure in embedding space.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
axis (int): Determines the axis for reduction
Returns:
Tensors: Returns the distance measure.
"""
if self.config.L1_flag:
return tf.reduce_sum(tf.abs(h + r - t), axis=axis) # L1 norm
else:
return tf.reduce_sum((h + r - t) ** 2, axis=axis) # L2 norm
def get_mapping(self, h, r, t):
"""Function to get the mapping for head, relation and tails.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns the mapped values for head, relation and tail
"""
h_m = tf.nn.embedding_lookup(self.ent_mappings, h)
r_m = tf.nn.embedding_lookup(self.rel_mappings, r)
t_m = tf.nn.embedding_lookup(self.ent_mappings, t)
return h_m, r_m, t_m
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
h_e = tf.nn.embedding_lookup(self.ent_embeddings, h)
r_e = tf.nn.embedding_lookup(self.rel_embeddings, r)
t_e = tf.nn.embedding_lookup(self.ent_embeddings, t)
h_e = tf.nn.l2_normalize(h_e, -1)
r_e = tf.nn.l2_normalize(r_e, -1)
t_e = tf.nn.l2_normalize(t_e, -1)
return h_e, r_e, t_e
def get_embed(self, h, r, t, sess):
"""Function to get the embedding value in numpy.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
sess (object): Tensorflow Session object.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
pos_h_e, pos_r_e, pos_t_e = self.embed(h, r, t)
# temp
pos_h_e, pos_r_e, pos_t_e = tf.squeeze(pos_h_e, 0), tf.squeeze(pos_r_e, 0), tf.squeeze(pos_t_e, 0)
h, r, t = sess.run([pos_h_e, pos_r_e, pos_t_e])
return h, r, t
def get_proj_embed(self, h, r, t, sess=None):
""""Function to get the projected embedding value in numpy.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
sess (object): Tensorflow Session object.
Returns:
Tensors: Returns head, relation and tail embedding Tensors.
"""
return self.get_embed(h, r, t, sess)
```
#### File: pykg2vec/example/kgpipeline.py
```python
from pykg2vec.utils.KGPipeline import KGPipeline
def main():
"""Function to test the KGPipeline function."""
kg_pipeline = KGPipeline(model="transe", dataset ="Freebase15k", debug=True)
kg_pipeline.tune()
kg_pipeline.test()
if __name__ == "__main__":
main()
```
#### File: pykg2vec/utils/bayesian_optimizer.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
from hyperopt import hp, fmin, tpe, Trials, STATUS_OK, space_eval
import pandas as pd
from pykg2vec.utils.kgcontroller import KnowledgeGraph
from pykg2vec.utils.trainer import Trainer
from pprint import pprint
model_path = "pykg2vec.core"
config_path = "pykg2vec.config.config"
hyper_param_path = "pykg2vec.config.hyperparams"
modelMap = {"complex": "Complex",
"conve": "ConvE",
"hole": "HoLE",
"distmult": "DistMult",
"kg2e": "KG2E",
"ntn": "NTN",
"proje_pointwise": "ProjE_pointwise",
"rescal": "Rescal",
"rotate": "RotatE",
"slm": "SLM",
"sme": "SME",
"transd": "TransD",
"transe": "TransE",
"transg": "TransG",
"transh": "TransH",
"transm": "TransM",
"transr": "TransR",
"tucker": "TuckER"}
configMap = {"complex": "ComplexConfig",
"conve": "ConvEConfig",
"hole": "HoLEConfig",
"distmult": "DistMultConfig",
"kg2e": "KG2EConfig",
"ntn": "NTNConfig",
"proje_pointwise": "ProjE_pointwiseConfig",
"rescal": "RescalConfig",
"rotate": "RotatEConfig",
"slm": "SLMConfig",
"sme": "SMEConfig",
"transd": "TransDConfig",
"transe": "TransEConfig",
"transg": "TransGConfig",
"transh": "TransHConfig",
"transm": "TransMConfig",
"transr": "TransRConfig",
"tucker": "TuckERConfig"}
hypMap = {"complex": "ComplexParams",
"conve": "ConvEParams",
"hole": "HoLEParams",
"distmult": "DistMultParams",
"kg2e": "KG2EParams",
"ntn": "NTNParams",
"proje_pointwise": "ProjE_pointwiseParams",
"rescal": "RescalParams",
"rotate": "RotatEParams",
"slm": "SLMParams",
"sme": "SMEParams",
"transd": "TransDParams",
"transe": "TransEParams",
"transg": "TransGParams",
"transh": "TransHParams",
"transm": "TransMParams",
"transr": "TransRParams",
"tucker": "TuckERParams"}
class BaysOptimizer(object):
"""Bayesian optimizer class for tuning hyperparameter.
This class implements the Bayesian Optimizer for tuning the
hyper-parameter.
Args:
args (object): The Argument Parser object providing arguments.
name_dataset (str): The name of the dataset.
sampling (str): sampling to be used for generating negative triples
Examples:
>>> from pykg2vec.config.hyperparams import KGETuneArgParser
>>> from pykg2vec.utils.bayesian_optimizer import BaysOptimizer
>>> model = Complex()
>>> args = KGETuneArgParser().get_args(sys.argv[1:])
>>> bays_opt = BaysOptimizer(args=args)
>>> bays_opt.optimize()
"""
def __init__(self, args=None):
"""store the information of database"""
model_name = args.model.lower()
self.args = args
self.knowledge_graph = KnowledgeGraph(dataset=args.dataset_name, negative_sample=args.sampling, custom_dataset_path=args.dataset_path)
hyper_params = None
try:
self.model_obj = getattr(importlib.import_module(model_path + ".%s" % modelMap[model_name]),
modelMap[model_name])
self.config_obj = getattr(importlib.import_module(config_path), configMap[model_name])
hyper_params = getattr(importlib.import_module(hyper_param_path), hypMap[model_name])()
except ModuleNotFoundError:
print("%s not implemented! Select from: %s" % (model_name,
' '.join(map(str, modelMap.values()))))
config = self.config_obj()
config.data=args.dataset_name
self.trainer = Trainer(model=self.model_obj(config), debug=self.args.debug, tuning=True)
if self.args.debug is True:
hyper_params.epochs = [1] # only try for 1 epoch if it is in debug mode.
self.search_space = self.define_search_space(hyper_params)
self.max_evals = self.args.max_number_trials if not self.args.debug else 1
def define_search_space(self, hyper_params):
"""Function to perform search space addition"""
space = {k: hp.choice(k, v) for k, v in hyper_params.__dict__.items() if not k.startswith('__') and not callable(k)}
return space
def optimize(self):
"""Function that performs bayesian optimization"""
space = self.search_space
trials = Trials()
self.best_result = fmin(fn=self.get_loss, space=space, algo=tpe.suggest, max_evals=self.max_evals, trials=trials)
columns = list(space.keys())
results = pd.DataFrame(columns=['iteration'] + columns + ['loss'])
for idx, trial in enumerate(trials.trials):
row = []
row.append(idx)
translated_eval = space_eval(self.search_space, {k: v[0] for k, v in trial['misc']['vals'].items()})
for k in columns:
row.append(translated_eval[k])
row.append(trial['result']['loss'])
results.loc[idx] = row
path = self.trainer.config.result / self.trainer.model.model_name
path.mkdir(parents=True, exist_ok=True)
results.to_csv(str(path / "trials.csv"), index=False)
print(results)
print('Found Golden Setting:')
pprint(space_eval(space, self.best_result))
def return_best(self):
"""Function to return the best hyper-parameters"""
return space_eval(self.search_space, self.best_result)
def get_loss(self, params):
"""Function that defines and acquires the loss"""
self.trainer.config.L1_flag = params['L1_flag']
self.trainer.config.batch_size = params['batch_size']
self.trainer.config.epochs = params['epochs']
if 'hidden_size' in params:
self.trainer.config.hidden_size = params['hidden_size']
if 'ent_hidden_size' in params:
self.trainer.config.ent_hidden_size = params['ent_hidden_size']
if 'rel_hidden_size' in params:
self.trainer.config.rel_hidden_size = params['rel_hidden_size']
self.trainer.config.learning_rate = params['learning_rate']
self.trainer.config.margin = params['margin']
self.trainer.config.disp_result = False
self.trainer.config.disp_summary = False
self.trainer.config.save_model = False
self.trainer.config.debug = True
self.trainer.config.test_num = 1000
self.trainer.build_model()
self.trainer.summary_hyperparameter()
loss = self.trainer.tune_model()
# loss = self.trainer.train_model(tuning=True)
return {'loss': loss, 'status': STATUS_OK}
``` |
{
"source": "JianGoForIt/pytorch-tutorial",
"score": 2
} |
#### File: 01-basics/logistic_regression/main.py
```python
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torch.utils.tensorboard import SummaryWriter
import argparse
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../../../../../"))
import utils
from compression_utils import ActivationUniformQuantizerBwOnly
from compression_utils import ActivationPercentageSparsifierBwOnly
from compression_utils import ActivationDropoutSparsifierBwOnly
import logging
from subprocess import check_output
# Hyper-parameters
input_size = 784
num_classes = 10
num_epochs = 20
batch_size = 100
hidden_size = 128
# we quantize the second linear layer of the 2layer MLP
sample_act_shape = [batch_size * 100, hidden_size]
# learning_rate = 0.1
class Net(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(Net, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.fc1(x)
# self.test_out if for checking the compression results
# during sanity check.
self.test_out = out
out = self.fc2(out)
return out
def setup_bw_comp_act(model, args):
assert args.use_quant == False or args.use_sparse == False
if args.use_quant:
nbit = args.quant_nbit
do_stoc = args.quant_stoc,
do_auto_clip = args.quant_clip
model.fc2.compressor = ActivationUniformQuantizerBwOnly(model.fc2, nbit, sample_act_shape,
do_stoc=do_stoc, do_auto_clip=do_auto_clip)
elif args.use_sparse:
if args.sparse_perc:
model.fc2.compressor = \
ActivationPercentageSparsifierBwOnly(model.fc2,
sparse_level=args.sparse_level, per_sample_sparse=args.per_sample_sparse)
elif args.sparse_dropout:
model.fc2.compressor = \
ActivationDropoutSparsifierBwOnly(model.fc2, sparse_level=args.sparse_level)
else:
raise Exception("The activation sparsifier type is not specified or supported!")
def collect_sample_to_estimate_clip_threshold(model, train_loader):
for i, (images, labels) in enumerate(train_loader):
# Reshape images to (batch_size, input_size)
images = images.reshape(-1, 28*28)
if torch.cuda.is_available():
images = images.cuda()
labels = labels.cuda()
# Forward pass
outputs = model(images)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--exp_name", type=str, help="The name of the experiment.")
parser.add_argument("--result_dir", type=str, help="The top level of experiment result directory.")
parser.add_argument("--use_quant", action="store_true", help="Use activation quantization for backward.")
parser.add_argument("--quant_nbit", type=int, help="# of bits for quantized activation.")
parser.add_argument("--quant_clip", action="store_true", help="Do auto clipping for activations.")
parser.add_argument("--quant_stoc", action="store_true", help="Use stochastic quantization for activation.")
parser.add_argument("--lr", type=float, default=0.1, help="The learning rate for training")
parser.add_argument("--seed", type=int, help="Random seed for the run.")
parser.add_argument("--debug", action="store_true", help="If the job is in debuging mode, git diff must be empty if not specified.")
parser.add_argument("--use_sparse", action="store_true", help="Use activation sparsifier")
parser.add_argument("--sparse_perc", action="store_true", help="Use magnitude and percentage based sparsification.")
parser.add_argument("--sparse_dropout", action="store_true", help="Use dropout style sparsification.")
parser.add_argument("--per_sample_sparse", action="store_true", help="Whether the sparse_level is with respect to each sample or the entire activation")
parser.add_argument("--sparse_level", type=float, default=0.0, help="Sparsity level. 0.9 means 90 percent of the act entries will be zeroed out.")
args = parser.parse_args()
init_results = utils.experiment_setup(args)
run_folder = utils.get_output_folder(args)
writer = SummaryWriter(log_dir=run_folder)
# MNIST dataset (images and labels)
train_dataset = torchvision.datasets.MNIST(root='../../data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='../../data',
train=False,
transform=transforms.ToTensor())
# Data loader (input pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Logistic regression model
# model = nn.Linear(input_size, num_classes)
model = Net(input_size=input_size, hidden_size=hidden_size, num_classes=num_classes)
setup_bw_comp_act(model, args=args)
if torch.cuda.is_available():
model = model.cuda()
# Loss and optimizer
# nn.CrossEntropyLoss() computes softmax internally
criterion = nn.CrossEntropyLoss()
if torch.cuda.is_available():
criterion = criterion.cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
# Train the model
train_loss_list = []
test_acc_list = []
total_step = len(train_loader)
for epoch in range(num_epochs):
if args.use_quant:
# determine clipping threshold if necessary
model.eval()
model.fc2.compressor.start_per_epoch_setup()
if args.use_quant and args.quant_clip:
collect_sample_to_estimate_clip_threshold(model, train_loader)
logging.info("Collected sample for clip threshold estimation for epoch {}".format(epoch) )
model.fc2.compressor.end_per_epoch_setup()
# run the training steps
model.train()
logging.info("Pre training procedures done for epoch {}".format(epoch) )
for i, (images, labels) in enumerate(train_loader):
# print("train ", i)
# start compressor for new epoch
model.fc2.compressor.start_epoch()
# Reshape images to (batch_size, input_size)
images = images.reshape(-1, 28*28)
if torch.cuda.is_available():
images = images.cuda()
labels = labels.cuda()
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
writer.add_scalar(tag="train_loss", scalar_value=loss.item(), global_step=epoch * total_step + i)
# logging.info("Train loss step {}: {}".format(epoch * total_step + i, loss.item()))
train_loss_list.append(loss.item())
# Backward and optimize
optimizer.zero_grad()
# # sanity check on activation
# print("train check 1 ", float((model.test_out == 0).sum()) / float(model.test_out.numel()))
# print("train check 2 ", float((model.test_out[0] == 0).sum()) / float(model.test_out[0].numel()))
# print("train check 3 ", model.test_out[0])
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
logging.info('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# turn off compressor at the end of each epoch
model.fc2.compressor.end_epoch()
# Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
with torch.no_grad():
model.eval()
model.fc2.compressor.start_epoch()
correct = 0
total = 0
for i, (images, labels) in enumerate(test_loader):
# print("test ", i)
images = images.reshape(-1, 28*28)
if torch.cuda.is_available():
images = images.cuda()
labels = labels.cuda()
outputs = model(images)
# # sanity check on activation
# print("test check 1 ", float((model.test_out == 0).sum()) / float(model.test_out.numel()))
# print("test check 2 ", float((model.test_out[0] == 0).sum()) / float(model.test_out[0].numel()))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
model.fc2.compressor.end_epoch()
model.train()
writer.add_scalar(tag="test_acc", scalar_value=100 * float(correct) / float(total), global_step=total_step * (epoch + 1))
test_acc_list.append(100 * float(correct) / float(total))
logging.info('Accuracy of the model on the 10000 test images: {} %'.format(100 * float(correct) / float(total)))
writer.close()
results = {"train_loss": train_loss_list,
"test_acc": test_acc_list}
results.update(init_results)
utils.save_result_json(args, results, run_folder)
# Save the model checkpoint
# torch.save(model.state_dict(), 'model.ckpt')
if __name__ == "__main__":
main()
``` |
{
"source": "JianGoForIt/tensorflow",
"score": 2
} |
#### File: python/framework/tensor_util_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class FloatDTypeTest(tf.test.TestCase):
def test_assert_same_float_dtype(self):
self.assertIs(
tf.float32, tf.contrib.framework.assert_same_float_dtype(None, None))
self.assertIs(
tf.float32, tf.contrib.framework.assert_same_float_dtype([], None))
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype([], tf.float32))
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype(None, tf.float32))
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype([None, None], None))
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype([None, None], tf.float32))
const_float = tf.constant(3.0, dtype=tf.float32)
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype([const_float], tf.float32))
self.assertRaises(
ValueError,
tf.contrib.framework.assert_same_float_dtype, [const_float], tf.int32)
sparse_float = tf.SparseTensor(
tf.constant([[111], [232]], tf.int64),
tf.constant([23.4, -43.2], tf.float32),
tf.constant([500], tf.int64))
self.assertIs(tf.float32, tf.contrib.framework.assert_same_float_dtype(
[sparse_float], tf.float32))
self.assertRaises(
ValueError,
tf.contrib.framework.assert_same_float_dtype, [sparse_float], tf.int32)
self.assertRaises(
ValueError, tf.contrib.framework.assert_same_float_dtype,
[const_float, None, sparse_float], tf.float64)
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype(
[const_float, sparse_float]))
self.assertIs(tf.float32, tf.contrib.framework.assert_same_float_dtype(
[const_float, sparse_float], tf.float32))
const_int = tf.constant(3, dtype=tf.int32)
self.assertRaises(ValueError, tf.contrib.framework.assert_same_float_dtype,
[sparse_float, const_int])
self.assertRaises(ValueError, tf.contrib.framework.assert_same_float_dtype,
[sparse_float, const_int], tf.int32)
self.assertRaises(ValueError, tf.contrib.framework.assert_same_float_dtype,
[sparse_float, const_int], tf.float32)
self.assertRaises(
ValueError, tf.contrib.framework.assert_same_float_dtype, [const_int])
def test_assert_scalar_int(self):
tf.contrib.framework.assert_scalar_int(tf.constant(3, dtype=tf.int32))
tf.contrib.framework.assert_scalar_int(tf.constant(3, dtype=tf.int64))
with self.assertRaisesRegexp(ValueError, "Unexpected type"):
tf.contrib.framework.assert_scalar_int(tf.constant(3, dtype=tf.float32))
with self.assertRaisesRegexp(ValueError, "Unexpected shape"):
tf.contrib.framework.assert_scalar_int(
tf.constant([3, 4], dtype=tf.int32))
class LocalVariabletest(tf.test.TestCase):
def test_local_variable(self):
with self.test_session() as sess:
self.assertEquals([], tf.local_variables())
value0 = 42
tf.contrib.framework.local_variable(value0)
value1 = 43
tf.contrib.framework.local_variable(value1)
variables = tf.local_variables()
self.assertEquals(2, len(variables))
self.assertRaises(tf.OpError, sess.run, variables)
tf.initialize_variables(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
if __name__ == "__main__":
tf.test.main()
```
#### File: python/platform/benchmark.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import numbers
import os
import re
import sys
import six
from google.protobuf import text_format
from tensorflow.core.util import test_log_pb2
from tensorflow.python.platform import app
from tensorflow.python.platform import gfile
# When a subclass of the Benchmark class is created, it is added to
# the registry automatically
GLOBAL_BENCHMARK_REGISTRY = set()
# Environment variable that determines whether benchmarks are written.
# See also tensorflow/core/util/reporter.h TestReporter::kTestReporterEnv.
TEST_REPORTER_TEST_ENV = "TEST_REPORT_FILE_PREFIX"
def _global_report_benchmark(
name, iters=None, cpu_time=None, wall_time=None,
throughput=None, extras=None):
"""Method for recording a benchmark directly.
Args:
name: The BenchmarkEntry name.
iters: (optional) How many iterations were run
cpu_time: (optional) Total cpu time in seconds
wall_time: (optional) Total wall time in seconds
throughput: (optional) Throughput (in MB/s)
extras: (optional) Dict mapping string keys to additional benchmark info.
Raises:
TypeError: if extras is not a dict.
IOError: if the benchmark output file already exists.
"""
if extras is not None:
if not isinstance(extras, dict):
raise TypeError("extras must be a dict")
test_env = os.environ.get(TEST_REPORTER_TEST_ENV, None)
if test_env is None:
# Reporting was not requested
return
entry = test_log_pb2.BenchmarkEntry()
entry.name = name
if iters is not None:
entry.iters = iters
if cpu_time is not None:
entry.cpu_time = cpu_time
if wall_time is not None:
entry.wall_time = wall_time
if throughput is not None:
entry.throughput = throughput
if extras is not None:
for (k, v) in extras.items():
if isinstance(v, numbers.Number):
entry.extras[k].double_value = v
else:
entry.extras[k].string_value = str(v)
serialized_entry = text_format.MessageToString(entry)
mangled_name = name.replace("/", "__")
output_path = "%s%s" % (test_env, mangled_name)
if gfile.Exists(output_path):
raise IOError("File already exists: %s" % output_path)
with gfile.GFile(output_path, "w") as out:
out.write(serialized_entry)
class _BenchmarkRegistrar(type):
"""The Benchmark class registrar. Used by abstract Benchmark class."""
def __new__(mcs, clsname, base, attrs):
newclass = super(mcs, _BenchmarkRegistrar).__new__(
mcs, clsname, base, attrs)
if len(newclass.mro()) > 2:
# Only the base Benchmark abstract class has mro length 2.
# The rest subclass from it and are therefore registered.
GLOBAL_BENCHMARK_REGISTRY.add(newclass)
return newclass
class Benchmark(six.with_metaclass(_BenchmarkRegistrar, object)):
"""Abstract class that provides helper functions for running benchmarks.
Any class subclassing this one is immediately registered in the global
benchmark registry.
Only methods whose names start with the word "benchmark" will be run during
benchmarking.
"""
def _get_name(self, overwrite_name):
"""Returns full name of class and method calling report_benchmark."""
# Expect that the caller called report_benchmark, which called _get_name.
caller = inspect.stack()[2]
calling_class = caller[0].f_locals.get("self", None)
# Use the method name, or overwrite_name is provided.
name = overwrite_name if overwrite_name is not None else caller[3]
if calling_class is not None:
# Prefix the name with the class name.
class_name = type(calling_class).__name__
name = "%s.%s" % (class_name, name)
return name
def report_benchmark(
self,
iters=None,
cpu_time=None,
wall_time=None,
throughput=None,
extras=None,
name=None):
"""Report a benchmark.
Args:
iters: (optional) How many iterations were run
cpu_time: (optional) Total cpu time in seconds
wall_time: (optional) Total wall time in seconds
throughput: (optional) Throughput (in MB/s)
extras: (optional) Dict mapping string keys to additional benchmark info.
name: (optional) Override the BenchmarkEntry name with `name`.
Otherwise it is inferred from the calling class and top-level
method name.
"""
name = self._get_name(overwrite_name=name)
_global_report_benchmark(
name=name, iters=iters, cpu_time=cpu_time, wall_time=wall_time,
throughput=throughput, extras=extras)
def _run_specific_benchmark(benchmark_class):
benchmark = benchmark_class()
attrs = dir(benchmark)
# Only run methods of this class whose names start with "benchmark"
for attr in attrs:
if not attr.startswith("benchmark"):
continue
benchmark_fn = getattr(benchmark, attr)
if not callable(benchmark_fn):
continue
# Call this benchmark method
benchmark_fn()
def _run_benchmarks(regex):
"""Run benchmarks that match regex `regex`.
This function goes through the global benchmark registry, and matches
benchmark **classe names** of the form "module.name.BenchmarkClass" to
the given regex. If a class matches, all of its benchmark methods
are run.
Args:
regex: The string regular expression to match Benchmark classes against.
"""
registry = list(GLOBAL_BENCHMARK_REGISTRY)
# Match benchmarks in registry against regex
for benchmark in registry:
benchmark_name = "%s.%s" % (benchmark.__module__, benchmark.__name__)
if re.search(regex, benchmark_name):
# Found a match
_run_specific_benchmark(benchmark)
def benchmarks_main(true_main=None):
"""Run benchmarks as declared in args.
Args:
true_main: True main function to run if benchmarks are not requested.
"""
argv = sys.argv
found_arg = [arg for arg in argv
if arg.startswith("--benchmarks=")
or arg.startswith("-benchmarks=")]
if found_arg:
# Remove --benchmarks arg from sys.argv
argv.remove(found_arg[0])
regex = found_arg[0].split("=")[1]
app.run(lambda _: _run_benchmarks(regex))
else:
true_main()
``` |
{
"source": "JiangongWang/mean-teacher-cross-domain-detection",
"score": 3
} |
#### File: transforms/presets/rcnn.py
```python
from __future__ import absolute_import
import copy
import mxnet as mx
from .. import bbox as tbbox
from .. import image as timage
from .. import mask as tmask
import numpy as np
__all__ = ['load_test',
'FasterRCNNDefaultTrainTransform', 'FasterRCNNDefaultValTransform',
'MaskRCNNDefaultTrainTransform', 'MaskRCNNDefaultValTransform', 'FasterRCNNDefaultSETransform']
def load_test(filenames, short=600, max_size=1000, mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)):
"""A util function to load all images, transform them to tensor by applying
normalizations. This function support 1 filename or list of filenames.
Parameters
----------
filenames : str or list of str
Image filename(s) to be loaded.
short : int, optional, default is 600
Resize image short side to this `short` and keep aspect ratio.
max_size : int, optional, default is 1000
Maximum longer side length to fit image.
This is to limit the input image shape, avoid processing too large image.
mean : iterable of float
Mean pixel values.
std : iterable of float
Standard deviations of pixel values.
Returns
-------
(mxnet.NDArray, numpy.ndarray) or list of such tuple
A (1, 3, H, W) mxnet NDArray as input to network, and a numpy ndarray as
original un-normalized color image for display.
If multiple image names are supplied, return two lists. You can use
`zip()`` to collapse it.
"""
if isinstance(filenames, str):
filenames = [filenames]
tensors = []
origs = []
for f in filenames:
img = mx.image.imread(f)
img = timage.resize_short_within(img, short, max_size)
orig_img = img.asnumpy().astype('uint8')
img = mx.nd.image.to_tensor(img)
img = mx.nd.image.normalize(img, mean=mean, std=std)
tensors.append(img.expand_dims(0))
origs.append(orig_img)
if len(tensors) == 1:
return tensors[0], origs[0]
return tensors, origs
class FasterRCNNDefaultTrainTransform(object):
"""Default Faster-RCNN training transform.
Parameters
----------
short : int, default is 600
Resize image shorter side to ``short``.
max_size : int, default is 1000
Make sure image longer side is smaller than ``max_size``.
net : mxnet.gluon.HybridBlock, optional
The faster-rcnn network.
.. hint::
If net is ``None``, the transformation will not generate training targets.
Otherwise it will generate training targets to accelerate the training phase
since we push some workload to CPU workers instead of GPUs.
mean : array-like of size 3
Mean pixel values to be subtracted from image tensor. Default is [0.485, 0.456, 0.406].
std : array-like of size 3
Standard deviation to be divided from image. Default is [0.229, 0.224, 0.225].
box_norm : array-like of size 4, default is (1., 1., 1., 1.)
Std value to be divided from encoded values.
num_sample : int, default is 256
Number of samples for RPN targets.
pos_iou_thresh : float, default is 0.7
Anchors larger than ``pos_iou_thresh`` is regarded as positive samples.
neg_iou_thresh : float, default is 0.3
Anchors smaller than ``neg_iou_thresh`` is regarded as negative samples.
Anchors with IOU in between ``pos_iou_thresh`` and ``neg_iou_thresh`` are
ignored.
pos_ratio : float, default is 0.5
``pos_ratio`` defines how many positive samples (``pos_ratio * num_sample``) is
to be sampled.
"""
def __init__(self, short=600, max_size=1000, net=None, mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225), box_norm=(1., 1., 1., 1.),
num_sample=256, pos_iou_thresh=0.7, neg_iou_thresh=0.3,
pos_ratio=0.5, augmentation=0, **kwargs):
self._short = short
self._max_size = max_size
self._mean = mean
self._std = std
self._anchors = None
if net is None:
return
# use fake data to generate fixed anchors for target generation
ashape = 128
# in case network has reset_ctx to gpu
anchor_generator = copy.deepcopy(net.rpn.anchor_generator)
anchor_generator.collect_params().reset_ctx(None)
anchors = anchor_generator(
mx.nd.zeros((1, 3, ashape, ashape))).reshape((1, 1, ashape, ashape, -1))
self._anchors = anchors
# record feature extractor for infer_shape
if not hasattr(net, 'features'):
raise ValueError("Cannot find features in network, it is a Faster-RCNN network?")
self._feat_sym = net.features(mx.sym.var(name='data'))
from ....model_zoo.rpn.rpn_target import RPNTargetGenerator
self._target_generator = RPNTargetGenerator(
num_sample=num_sample, pos_iou_thresh=pos_iou_thresh,
neg_iou_thresh=neg_iou_thresh, pos_ratio=pos_ratio,
stds=box_norm, **kwargs)
self.gray_augmentator = mx.image.RandomGrayAug(p=0.3)
self.brightness = mx.image.BrightnessJitterAug(0.15)
self.contrast = mx.image.ContrastJitterAug(0.15)
self.hue = mx.image.HueJitterAug(0.15)
self.saturation = mx.image.SaturationJitterAug(0.15)
self.color_list = [self.gray_augmentator, self.brightness, self.contrast, self.hue, self.saturation,
self.random_pca_lighting]
self.augmentation = augmentation
def random_pca_lighting(self, img):
return timage.random_pca_lighting(img, alphastd=0.05)
def random_color_aug(self, src):
len_color = len(self.color_list)
len_color = [i for i in range(len_color)]
np.random.shuffle(len_color)
for j in len_color:
if np.random.uniform(0, 1) > 0.3:
src = self.color_list[j](src)
return src
def __call__(self, src, label):
"""Apply transform to training image/label."""
# resize shorter side but keep in max_size
h, w, _ = src.shape
img = timage.resize_short_within(src, self._short, self._max_size, interp=1)
img = img.astype(np.float32)
if self.augmentation:
img = self.random_color_aug(img)
bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))
# random horizontal flip
h, w, _ = img.shape
img, flips = timage.random_flip(img, px=0.5)
bbox = tbbox.flip(bbox, (w, h), flip_x=flips[0])
# to tensor
img = mx.nd.image.to_tensor(img)
img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)
if self._anchors is None:
return img, bbox.astype(img.dtype)
# generate RPN target so cpu workers can help reduce the workload
# feat_h, feat_w = (img.shape[1] // self._stride, img.shape[2] // self._stride)
oshape = self._feat_sym.infer_shape(data=(1, 3, img.shape[1], img.shape[2]))[1][0]
anchor = self._anchors[:, :, :oshape[2], :oshape[3], :].reshape((-1, 4))
gt_bboxes = mx.nd.array(bbox[:, :4])
cls_target, box_target, box_mask = self._target_generator(
gt_bboxes, anchor, img.shape[2], img.shape[1])
return img, bbox.astype(img.dtype), cls_target, box_target, box_mask
class FasterRCNNDefaultSETransform(object):
"""SE SSD training transform which includes tons of image augmentations.
the difference lies in that the output is same images with different data augmentation.
The spatial augmentaion is the same while color augmentation is different.
Hope this will be faster since it can be called multi thread.
Parameters
----------
width : int
Image width.
height : int
Image height.
anchors : mxnet.nd.NDArray, optional
Anchors generated from SSD networks, the shape must be ``(1, N, 4)``.
Since anchors are shared in the entire batch so it is ``1`` for the first dimension.
``N`` is the number of anchors for each image.
.. hint::
If anchors is ``None``, the transformation will not generate training targets.
Otherwise it will generate training targets to accelerate the training phase
since we push some workload to CPU workers instead of GPUs.
mean : array-like of size 3
Mean pixel values to be subtracted from image tensor. Default is [0.485, 0.456, 0.406].
std : array-like of size 3
Standard deviation to be divided from image. Default is [0.229, 0.224, 0.225].
iou_thresh : float
IOU overlap threshold for maximum matching, default is 0.5.
box_norm : array-like of size 4, default is (0.1, 0.1, 0.2, 0.2)
Std value to be divided from encoded values.
"""
def __init__(self, short=600, max_size=1000, net=None, mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225), teacher_aug=False):
self._short = short
self._max_size = max_size
self._mean = mean
self._std = std
self._anchors = None
if net is None:
return
self.flip = mx.image.HorizontalFlipAug(0.5)
self.random_size_crop = mx.image.random_size_crop
self.gray_augmentator = mx.image.RandomGrayAug(p=0.3)
self.brightness = mx.image.BrightnessJitterAug(0.15)
self.contrast = mx.image.ContrastJitterAug(0.15)
self.hue = mx.image.HueJitterAug(0.15)
self.saturation = mx.image.SaturationJitterAug(0.15)
self.color_list = [self.gray_augmentator, self.brightness, self.contrast, self.hue, self.saturation,
self.random_pca_lighting]
self.teacher_aug = teacher_aug
def random_expand_aug(self, img):
img, _ = timage.random_expand(img, max_ratio=1.1, fill=[m for m in self._mean])
return img
def random_salt_0(self, img):
h, w, c = img.shape
mask = mx.nd.random.uniform(shape=(h, w, 1))
mask = mask > 0.001
img = img * mask
return img
def random_salt_255(self, img):
h, w, c = img.shape
mask = mx.nd.random.uniform(shape=(h, w, 1))
mask = mask > 0.001
img = img * mask + 255 * (1 - mask)
return img
def random_pca_lighting(self, img):
return timage.random_pca_lighting(img, alphastd=0.05)
def random_color_aug(self, src):
len_color = len(self.color_list)
len_color = [i for i in range(len_color)]
np.random.shuffle(len_color)
for j in len_color:
if np.random.uniform(0, 1) > 0.3:
src = self.color_list[j](src)
return src
def __call__(self, src, label):
"""Apply transform to training image/label."""
h, w, _ = src.shape
# interp = np.random.randint(0, 5)
img = timage.resize_short_within(src, self._short, self._max_size, interp=1)
img, flips = timage.random_flip(img, px=0.5)
img = img.astype(np.float32)
if self.teacher_aug:
target_image_1 = self.random_color_aug(img)
else:
target_image_1 = img
target_image_2 = self.random_color_aug(img)
# target_image_1 = mx.nd.image.to_tensor(target_image_1)
target_image_1 = mx.nd.image.to_tensor(target_image_1)
target_image_1 = mx.nd.image.normalize(target_image_1, mean=self._mean, std=self._std)
target_image_2 = mx.nd.image.to_tensor(target_image_2)
target_image_2 = mx.nd.image.normalize(target_image_2, mean=self._mean, std=self._std)
return target_image_1, target_image_2
class FasterRCNNDefaultSEMultiTeacherTransform(object):
"""SE SSD training transform which includes tons of image augmentations.
the difference lies in that the output is same images with different data augmentation.
The spatial augmentaion is the same while color augmentation is different.
Hope this will be faster since it can be called multi thread.
Parameters
----------
width : int
Image width.
height : int
Image height.
anchors : mxnet.nd.NDArray, optional
Anchors generated from SSD networks, the shape must be ``(1, N, 4)``.
Since anchors are shared in the entire batch so it is ``1`` for the first dimension.
``N`` is the number of anchors for each image.
.. hint::
If anchors is ``None``, the transformation will not generate training targets.
Otherwise it will generate training targets to accelerate the training phase
since we push some workload to CPU workers instead of GPUs.
mean : array-like of size 3
Mean pixel values to be subtracted from image tensor. Default is [0.485, 0.456, 0.406].
std : array-like of size 3
Standard deviation to be divided from image. Default is [0.229, 0.224, 0.225].
iou_thresh : float
IOU overlap threshold for maximum matching, default is 0.5.
box_norm : array-like of size 4, default is (0.1, 0.1, 0.2, 0.2)
Std value to be divided from encoded values.
"""
def __init__(self, short=600, max_size=1000, net=None, mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225), teacher_num=1, teacher_aug=False):
self._short = short
self._max_size = max_size
self._mean = mean
self._std = std
self._anchors = None
if net is None:
return
self.flip = mx.image.HorizontalFlipAug(0.5)
self.random_size_crop = mx.image.random_size_crop
self.gray_augmentator = mx.image.RandomGrayAug(p=0.3)
self.brightness = mx.image.BrightnessJitterAug(0.15)
self.contrast = mx.image.ContrastJitterAug(0.15)
self.hue = mx.image.HueJitterAug(0.15)
self.saturation = mx.image.SaturationJitterAug(0.15)
self.color_list = [self.gray_augmentator, self.brightness, self.contrast, self.hue, self.saturation,
self.random_pca_lighting]
self.teacher_num = teacher_num
self.teacher_aug = teacher_aug
def random_expand_aug(self, img):
img, _ = timage.random_expand(img, max_ratio=1.1, fill=[m for m in self._mean])
return img
def random_salt_0(self, img):
h, w, c = img.shape
mask = mx.nd.random.uniform(shape=(h, w, 1))
mask = mask > 0.001
img = img * mask
return img
def random_salt_255(self, img):
h, w, c = img.shape
mask = mx.nd.random.uniform(shape=(h, w, 1))
mask = mask > 0.001
img = img * mask + 255 * (1 - mask)
return img
def random_pca_lighting(self, img):
return timage.random_pca_lighting(img, alphastd=0.05)
def random_color_aug(self, src):
len_color = len(self.color_list)
len_color = [i for i in range(len_color)]
np.random.shuffle(len_color)
for j in len_color:
if np.random.uniform(0, 1) > 0.3:
src = self.color_list[j](src)
return src
def __call__(self, src, label):
"""Apply transform to training image/label."""
h, w, _ = src.shape
# interp = np.random.randint(0, 5)
img = timage.resize_short_within(src, self._short, self._max_size, interp=1)
img, flips = timage.random_flip(img, px=0.5)
img = img.astype(np.float32)
target_list_1 = []
target_list_2 = []
for k in range(self.teacher_num):
if self.teacher_aug:
target_image_1 = self.random_color_aug(img)
else:
target_image_1 = img
target_image_2 = self.random_color_aug(img)
target_image_1 = mx.nd.image.to_tensor(target_image_1)
target_image_1 = mx.nd.image.normalize(target_image_1, mean=self._mean, std=self._std)
target_image_2 = mx.nd.image.to_tensor(target_image_2)
target_image_2 = mx.nd.image.normalize(target_image_2, mean=self._mean, std=self._std)
target_list_1.append(target_image_1)
target_list_2.append(target_image_2)
target_list_1 = mx.nd.concat(*target_list_1, dim=0)
target_list_2 = mx.nd.concat(*target_list_2, dim=0)
return target_list_1, target_list_2
class FasterRCNNDefaultValTransform(object):
"""Default Faster-RCNN validation transform.
Parameters
----------
short : int, default is 600
Resize image shorter side to ``short``.
max_size : int, default is 1000
Make sure image longer side is smaller than ``max_size``.
mean : array-like of size 3
Mean pixel values to be subtracted from image tensor. Default is [0.485, 0.456, 0.406].
std : array-like of size 3
Standard deviation to be divided from image. Default is [0.229, 0.224, 0.225].
"""
def __init__(self, short=600, max_size=1000,
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
self._mean = mean
self._std = std
self._short = short
self._max_size = max_size
def __call__(self, src, label):
"""Apply transform to validation image/label."""
# resize shorter side but keep in max_size
h, w, _ = src.shape
img = timage.resize_short_within(src, self._short, self._max_size, interp=1)
# no scaling ground-truth, return image scaling ratio instead
bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))
im_scale = h / float(img.shape[0])
img = mx.nd.image.to_tensor(img)
img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)
return img, bbox.astype('float32'), mx.nd.array([im_scale])
class MaskRCNNDefaultTrainTransform(object):
"""Default Mask RCNN training transform.
Parameters
----------
short : int, default is 600
Resize image shorter side to ``short``.
max_size : int, default is 1000
Make sure image longer side is smaller than ``max_size``.
net : mxnet.gluon.HybridBlock, optional
The Mask R-CNN network.
.. hint::
If net is ``None``, the transformation will not generate training targets.
Otherwise it will generate training targets to accelerate the training phase
since we push some workload to CPU workers instead of GPUs.
mean : array-like of size 3
Mean pixel values to be subtracted from image tensor. Default is [0.485, 0.456, 0.406].
std : array-like of size 3
Standard deviation to be divided from image. Default is [0.229, 0.224, 0.225].
box_norm : array-like of size 4, default is (1., 1., 1., 1.)
Std value to be divided from encoded values.
num_sample : int, default is 256
Number of samples for RPN targets.
pos_iou_thresh : float, default is 0.7
Anchors larger than ``pos_iou_thresh`` is regarded as positive samples.
neg_iou_thresh : float, default is 0.3
Anchors smaller than ``neg_iou_thresh`` is regarded as negative samples.
Anchors with IOU in between ``pos_iou_thresh`` and ``neg_iou_thresh`` are
ignored.
pos_ratio : float, default is 0.5
``pos_ratio`` defines how many positive samples (``pos_ratio * num_sample``) is
to be sampled.
"""
def __init__(self, short=600, max_size=1000, net=None, mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225), box_norm=(1., 1., 1., 1.),
num_sample=256, pos_iou_thresh=0.7, neg_iou_thresh=0.3,
pos_ratio=0.5, **kwargs):
self._short = short
self._max_size = max_size
self._mean = mean
self._std = std
self._anchors = None
if net is None:
return
# use fake data to generate fixed anchors for target generation
ashape = 128
# in case network has reset_ctx to gpu
anchor_generator = copy.deepcopy(net.rpn.anchor_generator)
anchor_generator.collect_params().reset_ctx(None)
anchors = anchor_generator(
mx.nd.zeros((1, 3, ashape, ashape))).reshape((1, 1, ashape, ashape, -1))
self._anchors = anchors
# record feature extractor for infer_shape
if not hasattr(net, 'features'):
raise ValueError("Cannot find features in network, it is a Mask RCNN network?")
self._feat_sym = net.features(mx.sym.var(name='data'))
from ....model_zoo.rpn.rpn_target import RPNTargetGenerator
self._target_generator = RPNTargetGenerator(
num_sample=num_sample, pos_iou_thresh=pos_iou_thresh,
neg_iou_thresh=neg_iou_thresh, pos_ratio=pos_ratio,
stds=box_norm, **kwargs)
def __call__(self, src, label, segm):
"""Apply transform to training image/label."""
# resize shorter side but keep in max_size
h, w, _ = src.shape
img = timage.resize_short_within(src, self._short, self._max_size, interp=1)
bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))
segm = [tmask.resize(polys, (w, h), (img.shape[1], img.shape[0])) for polys in segm]
# random horizontal flip
h, w, _ = img.shape
img, flips = timage.random_flip(img, px=0.5)
bbox = tbbox.flip(bbox, (w, h), flip_x=flips[0])
segm = [tmask.flip(polys, (w, h), flip_x=flips[0]) for polys in segm]
# gt_masks (n, im_height, im_width) of uint8 -> float32 (cannot take uint8)
masks = [mx.nd.array(tmask.to_mask(polys, (w, h))) for polys in segm]
# n * (im_height, im_width) -> (n, im_height, im_width)
masks = mx.nd.stack(*masks, axis=0)
# to tensor
img = mx.nd.image.to_tensor(img)
img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)
if self._anchors is None:
return img, bbox.astype(img.dtype), masks
# generate RPN target so cpu workers can help reduce the workload
# feat_h, feat_w = (img.shape[1] // self._stride, img.shape[2] // self._stride)
oshape = self._feat_sym.infer_shape(data=(1, 3, img.shape[1], img.shape[2]))[1][0]
anchor = self._anchors[:, :, :oshape[2], :oshape[3], :].reshape((-1, 4))
gt_bboxes = mx.nd.array(bbox[:, :4])
cls_target, box_target, box_mask = self._target_generator(
gt_bboxes, anchor, img.shape[2], img.shape[1])
return img, bbox.astype(img.dtype), masks, cls_target, box_target, box_mask
class MaskRCNNDefaultValTransform(object):
"""Default Mask RCNN validation transform.
Parameters
----------
short : int, default is 600
Resize image shorter side to ``short``.
max_size : int, default is 1000
Make sure image longer side is smaller than ``max_size``.
mean : array-like of size 3
Mean pixel values to be subtracted from image tensor. Default is [0.485, 0.456, 0.406].
std : array-like of size 3
Standard deviation to be divided from image. Default is [0.229, 0.224, 0.225].
"""
def __init__(self, short=600, max_size=1000,
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
self._mean = mean
self._std = std
self._short = short
self._max_size = max_size
def __call__(self, src, label, mask):
"""Apply transform to validation image/label."""
# resize shorter side but keep in max_size
h, _, _ = src.shape
img = timage.resize_short_within(src, self._short, self._max_size, interp=1)
# no scaling ground-truth, return image scaling ratio instead
im_scale = float(img.shape[0]) / h
img = mx.nd.image.to_tensor(img)
img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)
return img, mx.nd.array([img.shape[-2], img.shape[-1], im_scale])
``` |
{
"source": "jiangph1001/-",
"score": 3
} |
#### File: jiangph1001/-/generate_id_number.py
```python
import datetime,argparse,csv
# 获取身份证号码最后一位
# 根据前17位进行计算
# Parameter:
# id:前17位的字符串列表
def get_lastnumber(id):
weight=[7,9,10,5,8,4,2,1,6,3,7,9,10,5,8,4,2]
if len(weight) != len(id):
print("计算最后一位时出错!")
print(id)
return 0
sum = 0
for i in range(len(weight)):
sum = sum + weight[i] * int(id[i])
loc = sum % 11
ans="10X98765432"
return ans[loc]
# 将Datetime格式转换成字符串格式并输出
def get_birthday_str(dt):
birth = dt.strftime("%Y%m%d")
return birth
# 输入年月日的信息,输出Datetime格式
def convert_datatime(year,month,day):
if isinstance(year,str):
year = int(year)
if isinstance(month,str):
month = int(month)
if isinstance(day,str):
day = int(day)
return datetime.datetime(year,month,day)
# 根据初始日期和截止日期生成字符串列表
def generate_bithday_by_range(begin_dt,end_dt):
dt = begin_dt
birthday=[]
while dt <= end_dt:
birthday.append(get_birthday_str(dt))
dt = dt + datetime.timedelta(days=1)
return birthday
def get_prefix_by_provice(province):
return get_prefix(3,province)
def get_prefix_by_area(area):
area_list = get_prefix(1,area)
num = "0";
if len(area_list) > 1:
cnt = 1
print(area+"有多个选项,你想要哪个?")
for area_num in area_list:
city_name = get_prefix(0,area_num[:4],1)[0]
print("["+str(cnt)+"]: "+area_num+","+city_name+area)
cnt = cnt + 1
num = input("请输入要选择的编号,0表示全都要:")
if int(num) == 0:
return area_list
else:
single_list = []
single_list.append(area_list[int(num)-1])
return single_list
def get_prefix_by_city(city):
num = get_prefix(1,city)
return get_prefix(0,num[0][:4])
# 获取前缀
# mode 按第几列查询
# arg_str 查询的参数
# loc 返回第几列的结果
def get_prefix(mode,arg_str,loc=0):
file_name = "prefix.csv"
prefix6 = []
with open(file_name,'r') as fd:
csv_reader = csv.reader(fd)
for row in csv_reader:
if arg_str in row[mode]:
prefix6.append(row[loc])
return prefix6
# 获取所有的生日字符串
# Parameter:
# date_str: 日期字符串
def get_all_birthday(date_str):
date_str = date_str.split('-')
if len(date_str) == 2:
# 两个日期的情况,输出这之间的字符串
begin_date = date_str[0].split('/')
end_date = date_str[1].split('/')
begin_dt = convert_datatime(begin_date[0],begin_date[1],begin_date[2])
end_dt = convert_datatime(end_date[0],end_date[1],end_date[2])
birthday = generate_bithday_by_range(begin_dt,end_dt)
else:
# 一个日期,直接输出当天的字符串
date = date_str[0].split('/')
dt=convert_datatime(date[0],date[1],date[2])
birthday = []
birthday.append(get_birthday_str(dt))
return birthday
gender_dict = {"女":"02468","男":"13579"}
def generate_id(args):
gender = args.gender
birthday = get_all_birthday(args.date)
if args.area:
prefix6 = get_prefix_by_area(args.area)
elif args.city:
prefix6 = get_prefix_by_city(args.city)
elif args.province:
prefix6 = get_prefix_by_provice(args.province)
if args.output:
fd = open(args.output,"w")
for pre in prefix6:
for date in birthday:
for num in range(100):
for gender_code in gender_dict[gender]:
id = pre + date + str(num).zfill(2) + gender_code
id = id + get_lastnumber(id)
if args.output:
fd.write(id)
fd.write("\n")
else:
print(id)
if args.output:
fd.close()
if __name__ == "__main__":
# female:0 male:1
parser = argparse.ArgumentParser()
parser.add_argument("-g","--gender",default = "男",help="性别,女或男")
parser.add_argument("-o","--output",help="输出到文件")
parser.add_argument("-d","--date",default="1970/01/01",help="出生日期(年/月/日),例:1970/1/1。范围:1970/01/01-1998/02/02")
# parser.add_argument("-r","--random",help="三位随机码")
parser.add_argument("-p","--province",default='北京市',help="出生地址精确到省/直辖市")
parser.add_argument("-c","--city",help="出生地址精确到地级市")
parser.add_argument("-a","--area",help="出生地址精确到县级市")
args = parser.parse_args()
#print(args)
generate_id(args)
``` |
{
"source": "jiangq195/dga_classfier",
"score": 3
} |
#### File: jiangq195/dga_classfier/feat_normalizer.py
```python
import pandas as pd
import numpy as np
def feat_norm():
black_list = ['ip', 'class', 'tld']
feat_table = pd.read_csv('./tmp/features.txt', delimiter='\t')
header = list(feat_table.columns)
feat_matrix = pd.DataFrame()
train_norm_args = []
for i in header:
if i in black_list:
feat_matrix[i] = feat_table.ix[:, i]
else:
line = feat_table.ix[:, i]
mean_ = line.mean()
max_ = line.max()
min_ = line.min()
feat_matrix[i] = (line - mean_) / (max_ - min_)
train_norm_args.append((mean_, max_, min_))
# print('converted %s' % i)
return train_norm_args, feat_matrix
def main():
_, feat_matrix = feat_norm()
feat_matrix.to_csv('./tmp/features_norm.txt')
if __name__ == '__main__':
main()
``` |
{
"source": "Jiangqi-7/Rep_MyCode",
"score": 4
} |
#### File: Rep_MyCode/Python/double_color_ball.py
```python
import random
single_list = []
result_list = []
def get_redNum() :
num = random.randint(1, 33)
re_num = str(num).zfill(2)
return re_num
def get_redBall() :
single_list = []
while True :
re_num = get_redNum()
if re_num in single_list :
continue
else :
single_list.append(re_num)
if len(single_list) == 6 :
break
single_list.sort()
return single_list
def get_buleBall() :
num = random.randint(1, 16)
re_num = str(num).zfill(2)
return re_num
def begin(total) :
for i in range(int(total)) :
while True :
single_list = get_redBall()
single_list.append(get_buleBall())
if single_list in result_list :
continue
else :
result_list.append(single_list)
break
for i in result_list:
print('第' + str(result_list.index(i) + 1) + '注:', end='')
for j in i:
print(j, end=' ')
print()
def again(total) :
list_temp = []
for i in range(int(total)) :
print('%d:第%d注重打' % (i+1,i+1))
list_temp.append(str(i+1))
while True :
choice = input('请选择:')
if choice not in list_temp :
print('您的输入有误,请重新输入。')
continue
else :
return choice
break
if __name__ == '__main__' :
while True :
total = input('请输入需要的注数(1——5):')
if total not in ('1','2','3','4','5') :
print('您的输入有误,请重新输入。')
continue
begin(total)
while True :
choice = input('''
0 : 确认
1 : 单注重打
2 :全部重打
请输入:''')
if choice not in ('0','1','2') :
print('您的输入有误,请重新输入。')
continue
elif choice == '0' :
break
elif choice == '1' :
choice = again(total)
single_list = []
single_list = get_redBall()
single_list.append(get_buleBall())
result_list[int(choice)-1] = single_list
for i in result_list:
print('第' + str(result_list.index(i) + 1) + '注:', end='')
for j in i:
print(j, end=' ')
print()
continue
elif choice == '2' :
result_list = []
begin(total)
continue
is_continue = input('是否继续?输入y继续,其他任意键退出:')
if is_continue == 'y' :
result_list = []
continue
else :
break
``` |
{
"source": "jiangqn/code_zoo",
"score": 3
} |
#### File: code_zoo/kappa/cohen_kappa.py
```python
import numpy as np
def cohen_kappa(a, b):
assert a.dtype == np.int32 and b.dtype == np.int32 and a.shape == b.shape
po = (a == b).astype(np.float32).mean()
categories = sorted(set(list(np.concatenate((a, b), axis=0))))
mp = {}
for i, c in enumerate(categories):
mp[c] = i
k = len(mp)
sa = np.zeros(shape=(k,), dtype=np.int32)
sb = np.zeros(shape=(k,), dtype=np.int32)
n = a.shape[0]
for x, y in zip(list(a), list(b)):
sa[mp[x]] += 1
sb[mp[y]] += 1
pe = 0
for i in range(k):
pe += (sa[i] / n) * (sb[i] / n)
kappa = (po - pe) / (1.0 - pe)
return kappa
if __name__ == '__main__':
a = np.asarray(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
dtype=np.int32
)
b = np.asarray(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
dtype=np.int32
)
print(cohen_kappa(a, b)) # 0.4
```
#### File: code_zoo/loss/focal_loss.py
```python
import torch
from torch import nn
class FocalLoss(nn.Module):
def __init__(self, gamma=2.0, ignore_index=None):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.ignore_index = ignore_index
def forward(self, logits, labels):
if self.ignore_index != None:
mask = labels != self.ignore_index
labels = torch.max(labels, torch.tensor(0).to(labels.device))
one_hot = torch.zeros_like(logits).to(logits.device)
one_hot = one_hot.scatter(1, labels.unsqueeze(-1), 1)
prob = torch.softmax(logits, dim=-1)
loss = - one_hot * torch.log(prob) * (1 - prob).pow(self.gamma)
loss = loss.sum(dim=1, keepdim=False)
if self.ignore_index != None:
loss = loss.masked_select(mask)
loss = loss.mean()
return loss
```
#### File: code_zoo/loss/smooth_cross_entropy.py
```python
import torch
from torch import nn
import torch.nn.functional as F
class SmoothCrossEntropy(nn.Module):
def __init__(self, smooth=0.1):
super(SmoothCrossEntropy, self).__init__()
# self.kldiv = nn.KLDivLoss()
self.smooth = smooth
def forward(self, input, target):
one_hot = torch.zeros_like(input).to(input.device)
one_hot = one_hot.scatter(1, target.unsqueeze(-1), 1)
target = (1 - self.smooth) * one_hot + self.smooth / (input.size(1) - 1) * (1 - one_hot)
input = input - input.max(dim=1, keepdim=True)[0]
loss = -target * F.log_softmax(input, dim=-1)
return loss.mean()
```
#### File: jiangqn/code_zoo/regression_analysis.py
```python
import numpy as np
from typing import List, Tuple
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
def regression_analysis(pairs: List[tuple], labels: List[str] = None):
"""
This is a function to do linear regression analysis.
Input pairs: [(x_1, y_1, label_1=None), ..., (x_n, y_n, label_n=None)]
Input labels: [xlabel, ylabel]
"""
# add support to show chinese
plt.rcParams['font.family'] = ['sans-serif']
plt.rcParams['font.sans-serif'] = ['SimHei']
# check labels
if labels != None:
assert len(labels) == 2
xlabel, ylabel = labels
for pair in pairs:
# check pair
assert len(pair) in (2, 3)
if len(pair) == 2:
x, y = pair
assert type(x) == np.ndarray and type(y) == np.ndarray and x.shape == y.shape
label = None
else:
x, y, label = pair
assert type(x) == np.ndarray and type(y) == np.ndarray and x.shape == y.shape and type(label) == str
# calculate lower_bound and upper_bound
xp = np.asarray([[x.min()], [x.max()]])
# plot scatter diagram
plt.scatter(x, y, label=label)
# fit with linear_regression
x = x[:, np.newaxis]
model = LinearRegression()
model.fit(x, y)
# predict on lower_bound and upper_bound
yp = model.predict(xp)
xp = xp[:, 0]
# plot line diagram
plt.plot(xp, yp)
# add xlabel and ylabel
if labels != None:
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# show labels
if label != None:
plt.legend()
plt.show()
``` |
{
"source": "jiangqn/IAN-pytorch",
"score": 3
} |
#### File: jiangqn/IAN-pytorch/model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
import numpy as np
class Attention(nn.Module):
def __init__(self, query_size, key_size):
super(Attention, self).__init__()
self.weights = nn.Parameter(torch.rand(key_size, query_size) * 0.2 - 0.1)
self.bias = nn.Parameter(torch.zeros(1))
def forward(self, query, key, mask):
# query: (batch_size, query_size)
# key: (batch_size, time_step, key_size)
# value: (batch_size, time_step, value_size)
# mask: (batch_size, time_step)
batch_size = key.size(0)
time_step = key.size(1)
weights = self.weights.repeat(batch_size, 1, 1) # (batch_size, key_size, query_size)
query = query.unsqueeze(-1) # (batch_size, query_size, 1)
mids = weights.matmul(query) # (batch_size, key_size, 1)
mids = mids.repeat(time_step, 1, 1, 1).transpose(0, 1) # (batch_size, time_step, key_size, 1)
key = key.unsqueeze(-2) # (batch_size, time_step, 1, key_size)
scores = torch.tanh(key.matmul(mids).squeeze() + self.bias) # (batch_size, time_step, 1, 1)
scores = scores.squeeze() # (batch_size, time_step)
scores = scores - scores.max(dim=1, keepdim=True)[0]
scores = torch.exp(scores) * mask
attn_weights = scores / scores.sum(dim=1, keepdim=True)
return attn_weights
class IAN(nn.Module):
def __init__(self, config):
super(IAN, self).__init__()
self.vocab_size = config.vocab_size
self.embedding_size = config.embedding_size
self.hidden_size = config.hidden_size
self.n_class = config.n_class
self.l2_reg = config.l2_reg
self.max_aspect_len = config.max_aspect_len
self.max_context_len = config.max_context_len
self.embedding = nn.Embedding(num_embeddings=self.vocab_size, embedding_dim=self.embedding_size)
self.aspect_lstm = nn.LSTM(input_size=self.embedding_size, hidden_size=self.hidden_size, batch_first=True)
self.context_lstm = nn.LSTM(input_size=self.embedding_size, hidden_size=self.hidden_size, batch_first=True)
self.aspect_attn = Attention(self.hidden_size, self.hidden_size)
self.context_attn = Attention(self.hidden_size, self.hidden_size)
self.dropout = nn.Dropout(config.dropout)
self.fc = nn.Linear(self.hidden_size * 2, self.n_class)
self.embedding.weight.data.copy_(torch.from_numpy(config.embedding))
def forward(self, aspect, context, aspect_mask, context_mask):
aspect = self.embedding(aspect)
aspect = self.dropout(aspect)
aspect_output, _ = self.aspect_lstm(aspect)
aspect_output = aspect_output * aspect_mask.unsqueeze(-1)
aspect_avg = aspect_output.sum(dim=1, keepdim=False) / aspect_mask.sum(dim=1, keepdim=True)
context = self.embedding(context)
context = self.dropout(context)
context_output, _ = self.context_lstm(context)
context_output = context_output * context_mask.unsqueeze(-1)
context_avg = context_output.sum(dim=1, keepdim=False) / context_mask.sum(dim=1, keepdim=True)
aspect_attn = self.aspect_attn(context_avg, aspect_output, aspect_mask).unsqueeze(1)
aspect_features = aspect_attn.matmul(aspect_output).squeeze()
context_attn = self.context_attn(aspect_avg, context_output, context_mask).unsqueeze(1)
context_features = context_attn.matmul(context_output).squeeze()
features = torch.cat([aspect_features, context_features], dim=1)
features = self.dropout(features)
output = self.fc(features)
output = torch.tanh(output)
return output
class IanDataset(Dataset):
def __init__(self, path):
data = np.load(path)
self.aspects = torch.from_numpy(data['aspects']).long()
self.contexts = torch.from_numpy(data['contexts']).long()
self.labels = torch.from_numpy(data['labels']).long()
self.aspect_lens = torch.from_numpy(data['aspect_lens']).long()
self.context_lens = torch.from_numpy(data['context_lens']).long()
self.len = self.labels.shape[0]
aspect_max_len = self.aspects.size(1)
context_max_len = self.contexts.size(1)
self.aspect_mask = torch.zeros(aspect_max_len, aspect_max_len)
self.context_mask = torch.zeros(context_max_len, context_max_len)
for i in range(aspect_max_len):
self.aspect_mask[i, 0:i + 1] = 1
for i in range(context_max_len):
self.context_mask[i, 0:i + 1] = 1
def __getitem__(self, index):
return self.aspects[index], self.contexts[index], self.labels[index], \
self.aspect_mask[self.aspect_lens[index] - 1], self.context_mask[self.context_lens[index] - 1]
def __len__(self):
return self.len
``` |
{
"source": "jiangqn/KSTER",
"score": 3
} |
#### File: KSTER/joeynmt/metrics.py
```python
from typing import List
import sacrebleu
def chrf(hypotheses, references, remove_whitespace=True):
"""
Character F-score from sacrebleu
:param hypotheses: list of hypotheses (strings)
:param references: list of references (strings)
:param remove_whitespace: (bool)
:return:
"""
return sacrebleu.corpus_chrf(hypotheses=hypotheses, references=[references],
remove_whitespace=remove_whitespace).score
def bleu(hypotheses, references, tokenize="13a"):
"""
Raw corpus BLEU from sacrebleu (without tokenization)
:param hypotheses: list of hypotheses (strings)
:param references: list of references (strings)
:param tokenize: one of {'none', '13a', 'intl', 'zh', 'ja-mecab'}
:return:
"""
return sacrebleu.corpus_bleu(hypotheses, [references], tokenize=tokenize).score
def token_accuracy(hypotheses: List[List[str]], references: List[List[str]]) \
-> float:
"""
Compute the accuracy of hypothesis tokens: correct tokens / all tokens
Tokens are correct if they appear in the same position in the reference.
:param hypotheses: list of tokenized hypotheses (List[List[str]])
:param references: list of tokenized references (List[List[str]])
:return: token accuracy (float)
"""
correct_tokens = 0
all_tokens = 0
assert len(hypotheses) == len(references)
for hyp, ref in zip(hypotheses, references):
all_tokens += len(hyp)
for h_i, r_i in zip(hyp, ref):
# min(len(h), len(r)) tokens considered
if h_i == r_i:
correct_tokens += 1
return (correct_tokens / all_tokens)*100 if all_tokens > 0 else 0.0
def sequence_accuracy(hypotheses, references):
"""
Compute the accuracy of hypothesis tokens: correct tokens / all tokens
Tokens are correct if they appear in the same position in the reference.
:param hypotheses: list of hypotheses (strings)
:param references: list of references (strings)
:return:
"""
assert len(hypotheses) == len(references)
correct_sequences = sum([1 for (hyp, ref) in zip(hypotheses, references)
if hyp == ref])
return (correct_sequences / len(hypotheses))*100 if hypotheses else 0.0
```
#### File: KSTER/scripts/post_process_hypothesis.py
```python
from joeynmt.vocabulary import Vocabulary
import os
import subprocess
import yaml
import glob
from sacremoses import MosesTokenizer, MosesDetokenizer
import spacy
from collections import Counter
config_path = glob.glob("*.yaml")[0]
config = yaml.safe_load(open(config_path, "r", encoding="utf-8"))
src_lang = config["data"]["src"]
trg_lang = config["data"]["trg"]
print(f"src_lang: {src_lang}\ttrg_lang: {trg_lang}")
base_path = "analysis"
detokenized_base_path = os.path.join(base_path, "detokenized")
tokenized_base_path = os.path.join(base_path, "tokenized")
bpe_base_path = os.path.join(base_path, "bpe")
detokenized_dev_path = os.path.join(detokenized_base_path, f"dev.{trg_lang}")
detokenized_test_path = os.path.join(detokenized_base_path, f"test.{trg_lang}")
tokenized_dev_path = os.path.join(tokenized_base_path, f"dev.tok.{trg_lang}")
tokenized_test_path = os.path.join(tokenized_base_path, f"test.tok.{trg_lang}")
bpe_dev_path = os.path.join(bpe_base_path, f"dev.bpe.32k.{trg_lang}")
bpe_test_path = os.path.join(bpe_base_path, f"test.bpe.32k.{trg_lang}")
print("rename hypothesis files")
subprocess.call(f"mv {base_path}/beam4_alpha0.6.dev {base_path}/dev.{trg_lang}", shell=True)
subprocess.call(f"mv {base_path}/beam4_alpha0.6.test {base_path}/test.{trg_lang}", shell=True)
if not os.path.exists(detokenized_base_path):
os.makedirs(detokenized_base_path)
print(f"copy hypothesis files into {detokenized_base_path}")
subprocess.call(f"cp {base_path}/dev.{trg_lang} {detokenized_base_path}", shell=True)
subprocess.call(f"cp {base_path}/test.{trg_lang} {detokenized_base_path}", shell=True)
def file_tokenize(src_path: str, trg_path: str, lang: str) -> None:
src_file = open(src_path, "r", encoding="utf-8")
trg_file = open(trg_path, "w", encoding="utf-8")
tokenizer = MosesTokenizer(lang=lang)
for line in src_file.readlines():
line = " ".join(tokenizer.tokenize(line.strip(), aggressive_dash_splits=True, escape=False))
trg_file.write(line + "\n")
src_file.close()
trg_file.close()
if not os.path.exists(tokenized_base_path):
os.makedirs(tokenized_base_path)
print("tokenize hypothesis")
file_tokenize(detokenized_dev_path, tokenized_dev_path, trg_lang)
file_tokenize(detokenized_test_path, tokenized_test_path, trg_lang)
if not os.path.exists(bpe_base_path):
os.makedirs(bpe_base_path)
dataset_base_path = os.path.dirname(config["data"]["train"])
codes_path = os.path.join(dataset_base_path, "codes.txt")
vocabulary_path = os.path.join(dataset_base_path, f"vocabulary.{trg_lang}")
print("segment word into subwords with bpe")
subprocess.call(f"subword-nmt apply-bpe -c {codes_path} --vocabulary {vocabulary_path} --vocabulary-threshold 50 < {tokenized_dev_path} > {bpe_dev_path}", shell=True)
subprocess.call(f"subword-nmt apply-bpe -c {codes_path} --vocabulary {vocabulary_path} --vocabulary-threshold 50 < {tokenized_test_path} > {bpe_test_path}", shell=True)
print(f"copy src files to {bpe_base_path}")
subprocess.call("cat %s >> %s" % (config["data"]["dev"] + "." + src_lang, f"{bpe_base_path}/dev.bpe.32k.{src_lang}"), shell=True)
subprocess.call("cat %s >> %s" % (config["data"]["test"] + "." + src_lang, f"{bpe_base_path}/test.bpe.32k.{src_lang}"), shell=True)
subprocess.call("cp %s %s" % (os.path.join(dataset_base_path, "vocab.txt"), bpe_base_path), shell=True)
model_dict = {
"en": "en_core_web_sm",
"de": "de_core_news_sm"
}
#subprocess.call("python3 -m spacy download %s" % model_dict[trg_lang], shell=True)
nlp = spacy.load(model_dict[trg_lang])
def compute_pos_tag_for_tokenized_file(src_path: str, trg_path: str) -> None:
src_file = open(src_path, "r", encoding="utf-8")
trg_file = open(trg_path, "w", encoding="utf-8")
for line in src_file.readlines():
line = line.strip()
words = line.split()
spaces = [True for _ in range(len(words) - 1)] + [False]
doc = spacy.tokens.doc.Doc(nlp.vocab, words=words, spaces=spaces)
for name, proc in nlp.pipeline:
doc = proc(doc)
pos_tags = []
for token in doc:
pos_tags.append(str(token.pos_))
pos_tags_line = " ".join(pos_tags)
trg_file.write(pos_tags_line + "\n")
src_file.close()
trg_file.close()
print("compute pos tag for tokenized file")
compute_pos_tag_for_tokenized_file(tokenized_dev_path, os.path.join(tokenized_base_path, f"dev.{trg_lang}.pos"))
compute_pos_tag_for_tokenized_file(tokenized_test_path, os.path.join(tokenized_base_path, f"test.{trg_lang}.pos"))
def assign_pos_tag_for_bpe(src_path: str, bpe_path: str, trg_path: str) -> None:
src_file = open(src_path, "r", encoding="utf-8")
bpe_file = open(bpe_path, "r", encoding="utf-8")
trg_file = open(trg_path, "w", encoding="utf-8")
for src_pos_line, bpe_line in zip(src_file.readlines(), bpe_file.readlines()):
src_pos_line = src_pos_line.strip().split()
bpe_line = bpe_line.strip().split()
trg_pos_tags = []
p = 0
for subword in bpe_line:
trg_pos_tags.append(src_pos_line[p])
if not subword.endswith("@@"):
p += 1
assert p == len(src_pos_line)
assert len(bpe_line) == len(trg_pos_tags)
trg_pos_tags_line = " ".join(trg_pos_tags)
trg_file.write(trg_pos_tags_line + "\n")
src_file.close()
bpe_file.close()
trg_file.close()
print("assign pos tag for bpe")
assign_pos_tag_for_bpe(os.path.join(tokenized_base_path, f"dev.{trg_lang}.pos"), os.path.join(bpe_base_path, f"dev.bpe.32k.{trg_lang}"),
os.path.join(bpe_base_path, f"dev.bpe.32k.{trg_lang}.pos"))
assign_pos_tag_for_bpe(os.path.join(tokenized_base_path, f"test.{trg_lang}.pos"), os.path.join(bpe_base_path, f"test.bpe.32k.{trg_lang}"),
os.path.join(bpe_base_path, f"test.bpe.32k.{trg_lang}.pos"))
token_file = open("token_map", "r", encoding="utf-8")
token_map = [int(x.strip()) for x in token_file.readlines()]
token_file.close()
counter = Counter(token_map)
vocab_file = open(config["data"]["trg_vocab"], "r", encoding="utf-8")
vocab_size = len(vocab_file.readlines()) + 4
vocab_file.close()
frequency = [counter[i] if i in counter else 0 for i in range(vocab_size)]
frequency_file = open(os.path.join(base_path, "token_frequency.txt"), "w", encoding="utf-8")
for c in frequency:
frequency_file.write(str(c) + "\n")
frequency_file.close()
config["data"]["dev"] = os.path.join(bpe_base_path, "dev.bpe.32k")
config["data"]["test"] = os.path.join(bpe_base_path, "test.bpe.32k")
with open("analysis_" + config_path, "w", encoding="utf-8") as f:
yaml.safe_dump(config, f)
``` |
{
"source": "jiangqn/LanguageModel",
"score": 3
} |
#### File: LanguageModel/src/language_model.py
```python
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
class LanguageModel(nn.Module):
def __init__(self, vocab_size: int, embed_size: int, hidden_size: int, num_layers: int, dropout: float, weight_tying: bool) -> None:
super(LanguageModel, self).__init__()
self.dropout = dropout
self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embed_size)
self.rnn = nn.LSTM(
input_size=embed_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=False,
dropout=dropout if num_layers > 1 else 0,
batch_first=True
)
self.output_projection = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, embed_size)
)
self.generator = nn.Linear(embed_size, vocab_size)
if weight_tying:
self.generator.weight = self.embedding.weight
def load_pretrained_embeddings(self, **kwargs) -> None:
assert ('path' in kwargs) ^ ('embedding' in kwargs)
if 'path' in kwargs:
embedding = np.load(kwargs['path'])
else:
embedding = kwargs['embedding']
self.embedding.weight.data.copy_(torch.tensor(embedding))
def forward(self, sentence: torch.Tensor) -> torch.Tensor:
'''
:param sentence: torch.LongTensor (batch_size, seq_len)
:return logit: torch.FloatTensor (batch_size, seq_len, vocab_size)
'''
sentence = self.embedding(sentence)
sentence = F.dropout(sentence, p=self.dropout, training=self.training)
hidden, _ = self.rnn(sentence)
output = self.output_projection(hidden)
logit = self.generator(output)
return logit
``` |
{
"source": "jiangqn/mini-seq2seq-pytorch",
"score": 3
} |
#### File: mini-seq2seq-pytorch/model/seq2seq.py
```python
import torch
import torch.nn as nn
import random
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, src, trg, teacher_forcing_ratio=0.5):
# src: (src_timestep, batch_size)
# trg: (trg_timestep, batch_size)
batch_size = src.size(1)
max_len = trg.size(0)
vocab_size = self.decoder.output_size
outputs = torch.zeros(max_len, batch_size, vocab_size)
encoder_output, hidden = self.encoder(src)
# encoder_output: (timestep, batch_size, hidden_size)
# hidden: (encoder.n_layers * 2, batch_size, hidden_size)
hidden = hidden[:self.decoder.n_layers]
# hidden: (decoder.n_layers, batch_size, hidden_size)
output = trg[0, :] # sos
for t in range(1, max_len):
output, hidden = self.decoder(
output, hidden, encoder_output)
# output: (batch_size, target_vocab_size)
# hidden: (decoder.n_layers, batch_size, hidden_size)
outputs[t] = output
is_teacher = random.random() < teacher_forcing_ratio
top1 = output.data.max(1)[1]
output = trg[t] if is_teacher else top1
return outputs
``` |
{
"source": "jiangqn/RNNLM",
"score": 2
} |
#### File: src/train/train.py
```python
import os
import yaml
import math
import torch
from torch import nn
from torch import optim
from torch.utils.data.dataloader import DataLoader
from src.language_model.rnnlm import RNNLM
from src.data_process.dataset import LMDataset
from src.utils.constants import PAD_INDEX, INF
from src.utils.logger import Logger
from src.train.eval import eval
from src.utils.sentence_clip import sentence_clip
def train(args):
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
base_path = os.path.join('./data', args.data)
processed_base_path = os.path.join(base_path, 'processed')
processed_train_path = os.path.join(processed_base_path, 'train.npz')
processed_valid_path = os.path.join(processed_base_path, 'valid.npz')
glove_path = os.path.join(processed_base_path, 'glove.npy')
save_path = os.path.join(processed_base_path, 'rnnlm.pkl')
log_base_path = os.path.join(base_path, 'log')
log_path = os.path.join(log_base_path, 'train_log.txt')
data_log_path = os.path.join(log_base_path, 'data_log.yml')
data_log = yaml.safe_load(open(data_log_path, 'r'))
vocab_size = data_log['vocab_size']
logger = Logger(log_path)
logger.log('make data')
train_data = LMDataset(processed_train_path)
valid_data = LMDataset(processed_valid_path)
train_loader = DataLoader(
dataset=train_data,
batch_size=args.batch_size,
shuffle=True,
pin_memory=True
)
valid_loader = DataLoader(
dataset=valid_data,
batch_size=args.batch_size,
shuffle=False,
pin_memory=True
)
logger.log('make model')
model = RNNLM(
vocab_size=vocab_size,
embed_size=args.embed_size,
hidden_size=args.hidden_size,
num_layers=args.num_layers,
dropout=args.dropout
)
logger.log('load pretrained embeddings')
model.load_pretrained_embeddings(glove_path, fixed=args.embedding_fixed=='True')
logger.log('transfer model to gpu')
model = model.cuda()
criterion = nn.CrossEntropyLoss(ignore_index=PAD_INDEX)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
logger.log('train start')
min_val_ppl = INF
early_stop_count = 0
for epoch in range(args.epoches):
total_tokens = 0
total_loss = 0
for i, data in enumerate(train_loader):
model = model.train()
optimizer.zero_grad()
src, trg = data
src, trg = src.cuda(), trg.cuda()
logit = model(src)
logit = logit.view(-1, vocab_size)
trg = trg.view(-1)
loss = criterion(logit, trg)
loss.backward()
optimizer.step()
valid_tokens = (trg != PAD_INDEX).long().sum().item()
total_tokens += valid_tokens
total_loss += loss * valid_tokens
if i % 100 == 0:
train_loss = total_loss / total_tokens
train_ppl = math.exp(train_loss)
total_loss, total_tokens = 0, 0
val_loss, val_ppl = eval(model, valid_loader, criterion)
logger.log('[epoch %d step %4d] train_loss: %.4f\ttrain_ppl: %.4f\tval_loss: %.4f\tval_ppl: %.4f' %
(epoch, i, train_loss, train_ppl, val_loss, val_ppl))
if val_ppl < min_val_ppl:
min_val_ppl = val_ppl
torch.save(model, save_path)
early_stop_count = 0
else:
early_stop_count += 100
if early_stop_count >= 5000:
break
if early_stop_count >= 5000:
break
logger.log('min_val_ppl: %.4f' % min_val_ppl)
logger.log('train finish')
``` |
{
"source": "jiangqn/TextCNN",
"score": 3
} |
#### File: TextCNN/src/test_text_cnn.py
```python
import torch
from torchtext import data
from torchtext.data import TabularDataset, Iterator
import os
import pickle
from src.eval import eval_text_cnn
def test_text_cnn(config):
os.environ['CUDA_VISIBLE_DEVICES'] = str(config['gpu'])
base_path = config['base_path']
save_path = os.path.join(base_path, 'text_cnn.pkl')
vocab_path = os.path.join(base_path, 'vocab.pkl')
TEXT = data.Field(sequential=True, lower=True, batch_first=True)
LABEL = data.Field(sequential=False, use_vocab=False, batch_first=True)
fields = [
('sentence', TEXT),
('label', LABEL)
]
test_data = TabularDataset(path=os.path.join(base_path, 'test.tsv'),
format='tsv', skip_header=True, fields=fields)
with open(vocab_path, 'rb') as handle:
vocab = pickle.load(handle)
TEXT.vocab = vocab
device = torch.device('cuda:0')
test_iter = Iterator(test_data, batch_size=config['batch_size'], shuffle=False, device=device)
model = torch.load(save_path)
test_accuracy = eval_text_cnn(model, test_iter)
print('test_accuracy: %.4f' % test_accuracy)
``` |
{
"source": "jiangqn/Unsupervised-Sentiment-Lexicon-Extraction",
"score": 3
} |
#### File: src/data_process/preprocess.py
```python
import os
import numpy as np
import pickle
from src.utils.constants import SPLIT, UNK
from src.data_process.vocab import Vocab
from src.data_process.utils import load_glove
def preprocess():
base_path = './data/'
raw_path = os.path.join(base_path, 'raw/data.txt')
processed_base_path = os.path.join(base_path, 'processed')
processed_data_path = os.path.join(processed_base_path, 'data.npz')
word2index_path = os.path.join(processed_base_path, 'word2index.pkl')
index2word_path = os.path.join(processed_base_path, 'index2word.pkl')
glove_source_path = '../datasets/embeddings/glove.840B.300d.txt'
glove_path = os.path.join(processed_base_path, 'glove.npy')
if not os.path.exists(processed_base_path):
os.makedirs(processed_base_path)
raw_file = open(raw_path, 'r', encoding='utf-8').readlines()
d = {
'negative': 0,
'positive': 1
}
vocab = Vocab()
sentences = []
labels = []
max_len = 0
for line in raw_file:
label, sentence = line.strip().split(SPLIT)
labels.append(d[label])
sentence = sentence.split()
vocab.add_list(sentence)
sentences.append(sentence)
max_len = max(max_len, len(sentence))
word2index, index2word = vocab.get_vocab()
num = len(sentences)
f = lambda x: word2index[x] if x in word2index else word2index[UNK]
for i in range(num):
sentences[i] = [f(word) for word in sentences[i]]
sentences[i].extend([0] * (max_len - len(sentences[i])))
sentences = np.asarray(sentences, dtype=np.int32)
labels = np.asarray(labels, dtype=np.int32)
np.savez(processed_data_path, sentence=sentences, label=labels)
with open(word2index_path, 'wb') as handle:
pickle.dump(word2index, handle)
with open(index2word_path, 'wb') as handle:
pickle.dump(index2word, handle)
glove = load_glove(glove_source_path, len(index2word), word2index)
np.save(glove_path, glove)
``` |
{
"source": "jiangqn/zhihu_analytics",
"score": 3
} |
#### File: zhihu_analytics/src/create_wordcloud.py
```python
import requests
import re
import time
import jieba
from collections import Counter
from wordcloud import WordCloud
from src.utils.constants import headers, stopwords_path, font_path
from src.utils.load_stopwords import load_stopwords
def get_page(question_id: int, offset: int) -> dict:
# 利用知乎API请求json数据
# question_id: 知乎问题号
# offset: 第几页
# 知乎API
url = "https://www.zhihu.com/api/v4/questions/{}/answers?include=content&limit=20&offset={}&platform=desktop&sort_by=default".format(
question_id, offset)
# https://www.zhihu.com/api/v4/questions/281789365/answers?include=content&limit=20&offset=20&platform=desktop&sort_by=default
# https://www.zhihu.com/question/281789365
res = requests.get(url, headers=headers)
res.encoding = "utf-8"
return res.json()
reg = re.compile("<[^>]*>")
def get_answers(question_id: int):
answers = []
offset = 0
while True:
page = get_page(question_id, offset)
page_answers = page["data"]
if len(page_answers) == 0 or len(answers) >= 1000:
break
for answer in page_answers:
content = answer["content"]
content = reg.sub("", content).replace("\n", "").replace(" ", "")
answers.append(
{
"question_id": question_id,
"author_id": answer["author"]["id"],
"author_name": answer["author"]["name"],
"answer_id": answer["id"],
"answer_content": content
}
)
offset += 20
# time.sleep(1)
return answers
def normal_cut_sentence(text):
text = re.sub('([。!?\?])([^’”])',r'\1\n\2',text)#普通断句符号且后面没有引号
text = re.sub('(\.{6})([^’”])',r'\1\n\2',text)#英文省略号且后面没有引号
text = re.sub('(\…{2})([^’”])',r'\1\n\2',text)#中文省略号且后面没有引号
text = re.sub('([.。!?\?\.{6}\…{2}][’”])([^’”])',r'\1\n\2',text)#断句号+引号且后面没有引号
return text.split("\n")
stopwords = load_stopwords(stopwords_path)
def create_wordcloud(question_id: int) -> None:
answers = get_answers(question_id)
words = []
for answer in answers:
document = answer["answer_content"]
sentences = normal_cut_sentence(document)
for sentence in sentences:
words.extend(jieba.lcut(sentence))
words = " ".join(words)
wordcloud = WordCloud(
font_path=font_path,
background_color="white",
width=1000,
height=700,
stopwords=stopwords,
max_words=100
).generate(words)
wordcloud.to_file("static/zhihu.png")
``` |
{
"source": "jiangr100/Garment-Pattern-Generator",
"score": 2
} |
#### File: Garment-Pattern-Generator/data_generation/datascan.py
```python
from __future__ import print_function
import os
import time
from datetime import timedelta
# Maya
from maya import cmds
import maya.standalone
# My modules
import customconfig
# reload in case we are in Maya internal python environment
reload(customconfig)
# Had to make copy of functions from datasim.py, becuase of issues with importing maya-related packages
def init_mayapy():
try:
print('Initilializing Maya tools...')
maya.standalone.initialize()
print('Load plugins')
cmds.loadPlugin('mtoa.mll') # https://stackoverflow.com/questions/50422566/how-to-register-arnold-render
cmds.loadPlugin('objExport.mll') # same as in https://forums.autodesk.com/t5/maya-programming/invalid-file-type-specified-atomimport/td-p/9121166
except Exception as e:
print(e)
print('Init failed')
pass
def stop_mayapy():
maya.standalone.uninitialize()
print("Maya stopped")
def transfer_segm_labels(verts_before, mesh, dir_path, name):
"""
Save segmentation labels for mesh after scan imitation
"""
verts_after = utils.get_vertices_np(mesh)
verts_mapping = utils.match_vert_lists(verts_after, verts_before)
# print(os.path.join(dir_path, name + '_sim_segmentation.txt'))
with open(os.path.join(dir_path, name + '_sim_segmentation.txt'), 'r') as f:
vert_labels = [line.rstrip() for line in f] # remove \n
scan_labels = [vert_labels[i] for i in verts_mapping]
filepath = os.path.join(dir_path, name + '_scan_imitation_segmentation.txt')
with open(filepath, 'w') as f:
for panel_name in scan_labels:
f.write("%s\n" % panel_name)
return 0
if __name__ == "__main__":
system_config = customconfig.Properties('system.json') # Make sure it's in \Autodesk\MayaNNNN\
path = system_config['templates_path']
# ------ Datasets ------
dataset_folders = [
# 'merged_dress_sleeveless_2550_210429-13-12-52',
# 'merged_jumpsuit_sleeveless_2000_210429-11-46-14',
# 'merged_skirt_8_panels_1000_210521-16-20-14',
# 'merged_wb_pants_straight_1500_210521-16-30-57',
# 'merged_skirt_2_panels_1200_210521-16-46-27',
# 'merged_jacket_2200_210521-16-55-26',
# 'merged_tee_sleeveless_1800_210521-17-10-22',
'merged_wb_dress_sleeveless_2600_210521-17-26-08', # had fails
# 'merged_jacket_hood_2700_210521-17-47-44',
# 'data_1000_pants_straight_sides_210520-22-34-57'
]
# ------ Start Maya instance ------
init_mayapy()
import mayaqltools as mymaya # has to import after maya is loaded
reload(mymaya) # reload in case we are in Maya internal python environment
from mayaqltools import utils
for dataset in dataset_folders:
datapath = os.path.join(system_config['datasets_path'], dataset)
# print(datapath)
dataset_file = os.path.join(datapath, 'dataset_properties.json')
data_props = customconfig.Properties(dataset_file)
if not data_props['to_subfolders']:
raise NotImplementedError('Scanning only works on datasets organized in subfolders') # just for simplicity
# load body to the scene
body = utils.load_file(os.path.join(system_config['bodies_path'], data_props['body']), 'body')
utils.scale_to_cm(body)
# ------ Main loop --------
if 'scan_imitation' not in data_props:
number_of_rays = 30
number_of_visible_rays = 4
data_props.set_section_config(
'scan_imitation', test_rays_num=number_of_rays, visible_rays_num=number_of_visible_rays)
data_props.set_section_stats('scan_imitation', fails=[], faces_removed={}, processing_time={})
if 'fails' not in data_props['scan_imitation']['stats']:
data_props['scan_imitation']['stats']['fails'] = []
if 'faces_removed' not in data_props['scan_imitation']['stats']:
data_props['scan_imitation']['stats']['faces_removed'] = {}
if 'processing_time' not in data_props['scan_imitation']['stats']:
data_props['scan_imitation']['stats']['processing_time'] = {}
if 'frozen' not in data_props:
data_props['frozen'] = True # when True, the files that are already processed will be skipped!
# go over the examples in the data
start_time = time.time()
to_ignore = ['renders'] # special dirs not to include in the pattern list
root, dirs, files = next(os.walk(datapath)) # cannot use os.scandir in python 2.7
for name in dirs:
if name not in to_ignore:
dir_path = os.path.join(root, name)
# skip if already has a corresponding file
_, elem_dirs, elem_files = next(os.walk(dir_path))
if data_props['frozen'] and any(['scan_imitation.obj' in filename for filename in elem_files]):
print('Datascan::Info::Skipped {} as already processed'.format(name))
continue
# unfreeze dataset to re-do scan imitation on already processed elements
if not any([name + '_sim.obj' in filename for filename in elem_files]):
# simulation result does not exist
print('Datascan::Warning::Skipped {} as .obj file does not exist'.format(name))
data_props['scan_imitation']['stats']['fails'].append(name)
continue
# load mesh
garment = utils.load_file(os.path.join(dir_path, name + '_sim.obj'), name + '_sim')
mesh, _ = utils.get_mesh_dag(garment)
verts_before = utils.get_vertices_np(mesh)
# do what we are here for
removed, time_taken = mymaya.scan_imitation.remove_invisible(
garment, [body],
data_props['scan_imitation']['config']['test_rays_num'],
data_props['scan_imitation']['config']['visible_rays_num'])
data_props['scan_imitation']['stats']['faces_removed'][name] = removed
data_props['scan_imitation']['stats']['processing_time'][name] = time_taken
# save to original folder
utils.save_mesh(garment, os.path.join(dir_path, name + '_scan_imitation.obj'))
# transfer the segmentation labels
if not any([name + '_sim_segmentation.txt' in filename for filename in elem_files]):
# segmentation labels file for sim does not exist
print('Datascan::Warning::{}:: Skipped segmentation transfer as segmentation file does not exist'.format(name))
data_props['scan_imitation']['stats']['fails'].append(name)
else:
try:
transfer_segm_labels(verts_before, mesh, dir_path, name)
except ValueError as e:
print(e)
data_props['scan_imitation']['stats']['fails'].append(name)
data_props.serialize(dataset_file) # just in case
cmds.delete(garment) # cleanup
# update props & save
passed = time.time() - start_time
data_props.summarize_stats('processing_time', log_sum=True, log_avg=True, as_time=True)
data_props.summarize_stats('faces_removed', log_avg=True)
data_props.set_section_stats(
'scan_imitation', total_processing_time=str(timedelta(seconds=passed))
)
data_props['frozen'] = True # force freezing after processing is finished
data_props.serialize(dataset_file)
# print('Scan imitation on {} performed successfully!!!'.format(dataset))
# clean the scene s.t. next dataset can use another body mesh
cmds.delete(body)
# End Maya instance
stop_mayapy() # ensures correct exit without errors
``` |
{
"source": "jiangrongatrobo/cvat",
"score": 2
} |
#### File: apps/dataset_manager/views.py
```python
import os
import os.path as osp
import tempfile
from datetime import timedelta
from enum import Enum
import django_rq
from django.utils import timezone
import pandas as pd
from openpyxl.workbook.child import INVALID_TITLE_REGEX
import re
import cvat.apps.dataset_manager.task as task
from cvat.apps.engine.log import slogger
from cvat.apps.engine.models import Project, Task
from datumaro.cli.util import make_file_name
from datumaro.util import to_snake_case
from .formats.registry import EXPORT_FORMATS, IMPORT_FORMATS
from .util import current_function_name
_MODULE_NAME = __package__ + '.' + osp.splitext(osp.basename(__file__))[0]
def log_exception(logger=None, exc_info=True):
if logger is None:
logger = slogger
logger.exception("[%s @ %s]: exception occurred" % \
(_MODULE_NAME, current_function_name(2)),
exc_info=exc_info)
def get_export_cache_dir(db_task):
task_dir = osp.abspath(db_task.get_task_dirname())
if osp.isdir(task_dir):
return osp.join(task_dir, 'export_cache')
else:
raise Exception('Task dir {} does not exist'.format(task_dir))
DEFAULT_CACHE_TTL = timedelta(hours=10)
CACHE_TTL = DEFAULT_CACHE_TTL
def export_task(task_id, dst_format, server_url=None, save_images=False):
try:
db_task = Task.objects.get(pk=task_id)
cache_dir = get_export_cache_dir(db_task)
exporter = EXPORT_FORMATS[dst_format]
output_base = '%s_%s' % ('dataset' if save_images else 'annotations',
make_file_name(to_snake_case(dst_format)))
output_path = '%s.%s' % (output_base, exporter.EXT)
output_path = osp.join(cache_dir, output_path)
task_time = timezone.localtime(db_task.updated_date).timestamp()
if not (osp.exists(output_path) and \
task_time <= osp.getmtime(output_path)):
os.makedirs(cache_dir, exist_ok=True)
with tempfile.TemporaryDirectory(dir=cache_dir) as temp_dir:
temp_file = osp.join(temp_dir, 'result')
task.export_task(task_id, temp_file, dst_format,
server_url=server_url, save_images=save_images)
os.replace(temp_file, output_path)
archive_ctime = osp.getctime(output_path)
scheduler = django_rq.get_scheduler()
cleaning_job = scheduler.enqueue_in(time_delta=CACHE_TTL,
func=clear_export_cache,
task_id=task_id,
file_path=output_path, file_ctime=archive_ctime)
slogger.task[task_id].info(
"The task '{}' is exported as '{}' at '{}' "
"and available for downloading for the next {}. "
"Export cache cleaning job is enqueued, id '{}'".format(
db_task.name, dst_format, output_path, CACHE_TTL,
cleaning_job.id))
return output_path
except Exception:
log_exception(slogger.task[task_id])
raise
def export_task_as_dataset(task_id, dst_format=None, server_url=None):
return export_task(task_id, dst_format, server_url=server_url, save_images=True)
class Metrics(Enum):
Rectangle = "Rectangle"
Tags = "Tags"
Manually = "Manually"
Total = "Total"
def __str__(self):
return self.value
def export_project_stats(tasks: list, db_project, server_url=None):
project_dir = osp.abspath(db_project.get_project_dirname())
if osp.isdir(project_dir):
cache_dir = osp.join(project_dir, 'export_cache')
os.makedirs(cache_dir, exist_ok=True)
else:
raise Exception('Project dir {} does not exist'.format(project_dir))
output_path = osp.join(cache_dir, 'stats.xlsx')
xlsx = pd.ExcelWriter(output_path)
labels = dict((each['id'], each['name']) for each in list(db_project.label_set.values()))
labels = dict((int(k),v) for k,v in labels.items())
for (tid,tname) in tasks:
db_ann = task.get_task_data(tid)
struct = dict((v, dict((each.name, 0) for each in Metrics))
for v in list(labels.values()) + ["Total"])
for each in db_ann['shapes']:
source = each['source']
type = each['type']
label = labels[each['label_id']]
if type == 'rectangle':
struct[label][Metrics.Rectangle.name] += 1
struct["Total"][Metrics.Rectangle.name] += 1
if source == 'manual':
struct[label][Metrics.Manually.name] += 1
struct["Total"][Metrics.Manually.name] += 1
struct[label][Metrics.Total.name] += 1
struct["Total"][Metrics.Total.name] += 1
for each in db_ann['tags']:
source = each['source']
label = labels[each['label_id']]
struct[label][Metrics.Tags.name] += 1
struct["Total"][Metrics.Tags.name] += 1
if source == 'manual':
struct[label][Metrics.Manually.name] += 1
struct["Total"][Metrics.Manually.name] += 1
struct[label][Metrics.Total.name] += 1
struct["Total"][Metrics.Total.name] += 1
struct_df = pd.DataFrame(struct).transpose()
title="#{}_{}".format(tid, tname)
title = re.sub(INVALID_TITLE_REGEX, '_', title)
struct_df.to_excel(xlsx, sheet_name=title, index=True)
xlsx.close()
return output_path
def export_task_annotations(task_id, dst_format=None, server_url=None):
return export_task(task_id, dst_format, server_url=server_url, save_images=False)
def clear_export_cache(task_id, file_path, file_ctime):
try:
if osp.exists(file_path) and osp.getctime(file_path) == file_ctime:
os.remove(file_path)
slogger.task[task_id].info(
"Export cache file '{}' successfully removed" \
.format(file_path))
except Exception:
log_exception(slogger.task[task_id])
raise
def get_export_formats():
return list(EXPORT_FORMATS.values())
def get_import_formats():
return list(IMPORT_FORMATS.values())
def get_all_formats():
return {
'importers': get_import_formats(),
'exporters': get_export_formats(),
}
``` |
{
"source": "jiangrongatrobo/mmdetection",
"score": 2
} |
#### File: core/fp16/deprecated_fp16_utils.py
```python
import warnings
from mmcv.runner import (Fp16OptimizerHook, auto_fp16, force_fp32,
wrap_fp16_model)
class DeprecatedFp16OptimizerHook(Fp16OptimizerHook):
"""A wrapper class for the FP16 optimizer hook. This class wraps
:class:`Fp16OptimizerHook` in `mmcv.runner` and shows a warning that the
:class:`Fp16OptimizerHook` from `mmdet.core` will be deprecated.
Refer to :class:`Fp16OptimizerHook` in `mmcv.runner` for more details.
Args:
loss_scale (float): Scale factor multiplied with loss.
"""
def __init__(*args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
'Importing Fp16OptimizerHook from "mmdet.core" will be '
'deprecated in the future. Please import them from "mmcv.runner" '
'instead')
def deprecated_auto_fp16(*args, **kwargs):
warnings.warn(
'Importing auto_fp16 from "mmdet.core" will be '
'deprecated in the future. Please import them from "mmcv.runner" '
'instead')
return auto_fp16(*args, **kwargs)
def deprecated_force_fp32(*args, **kwargs):
warnings.warn(
'Importing force_fp32 from "mmdet.core" will be '
'deprecated in the future. Please import them from "mmcv.runner" '
'instead')
return force_fp32(*args, **kwargs)
def deprecated_wrap_fp16_model(*args, **kwargs):
warnings.warn(
'Importing wrap_fp16_model from "mmdet.core" will be '
'deprecated in the future. Please import them from "mmcv.runner" '
'instead')
wrap_fp16_model(*args, **kwargs)
```
#### File: models/dense_heads/corner_head.py
```python
from math import ceil, log
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, bias_init_with_prob
from mmcv.ops import CornerPool, batched_nms
from mmdet.core import multi_apply
from ..builder import HEADS, build_loss
from ..utils import gaussian_radius, gen_gaussian_target
from .base_dense_head import BaseDenseHead
class BiCornerPool(nn.Module):
"""Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.)
Args:
in_channels (int): Input channels of module.
out_channels (int): Output channels of module.
feat_channels (int): Feature channels of module.
directions (list[str]): Directions of two CornerPools.
norm_cfg (dict): Dictionary to construct and config norm layer.
"""
def __init__(self,
in_channels,
directions,
feat_channels=128,
out_channels=128,
norm_cfg=dict(type='BN', requires_grad=True)):
super(BiCornerPool, self).__init__()
self.direction1_conv = ConvModule(
in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
self.direction2_conv = ConvModule(
in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
self.aftpool_conv = ConvModule(
feat_channels,
out_channels,
3,
padding=1,
norm_cfg=norm_cfg,
act_cfg=None)
self.conv1 = ConvModule(
in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
self.conv2 = ConvModule(
in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg)
self.direction1_pool = CornerPool(directions[0])
self.direction2_pool = CornerPool(directions[1])
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
"""Forward features from the upstream network.
Args:
x (tensor): Input feature of BiCornerPool.
Returns:
conv2 (tensor): Output feature of BiCornerPool.
"""
direction1_conv = self.direction1_conv(x)
direction2_conv = self.direction2_conv(x)
direction1_feat = self.direction1_pool(direction1_conv)
direction2_feat = self.direction2_pool(direction2_conv)
aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat)
conv1 = self.conv1(x)
relu = self.relu(aftpool_conv + conv1)
conv2 = self.conv2(relu)
return conv2
@HEADS.register_module()
class CornerHead(BaseDenseHead):
"""Head of CornerNet: Detecting Objects as Paired Keypoints.
Code is modified from the `official github repo
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/
kp.py#L73>`_ .
More details can be found in the `paper
<https://arxiv.org/abs/1808.01244>`_ .
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
num_feat_levels (int): Levels of feature from the previous module. 2
for HourglassNet-104 and 1 for HourglassNet-52. Because
HourglassNet-104 outputs the final feature and intermediate
supervision feature and HourglassNet-52 only outputs the final
feature. Default: 2.
corner_emb_channels (int): Channel of embedding vector. Default: 1.
train_cfg (dict | None): Training config. Useless in CornerHead,
but we keep this variable for SingleStageDetector. Default: None.
test_cfg (dict | None): Testing config of CornerHead. Default: None.
loss_heatmap (dict | None): Config of corner heatmap loss. Default:
GaussianFocalLoss.
loss_embedding (dict | None): Config of corner embedding loss. Default:
AssociativeEmbeddingLoss.
loss_offset (dict | None): Config of corner offset loss. Default:
SmoothL1Loss.
"""
def __init__(self,
num_classes,
in_channels,
num_feat_levels=2,
corner_emb_channels=1,
train_cfg=None,
test_cfg=None,
loss_heatmap=dict(
type='GaussianFocalLoss',
alpha=2.0,
gamma=4.0,
loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.25,
push_weight=0.25),
loss_offset=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1)):
super(CornerHead, self).__init__()
self.num_classes = num_classes
self.in_channels = in_channels
self.corner_emb_channels = corner_emb_channels
self.with_corner_emb = self.corner_emb_channels > 0
self.corner_offset_channels = 2
self.num_feat_levels = num_feat_levels
self.loss_heatmap = build_loss(
loss_heatmap) if loss_heatmap is not None else None
self.loss_embedding = build_loss(
loss_embedding) if loss_embedding is not None else None
self.loss_offset = build_loss(
loss_offset) if loss_offset is not None else None
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self._init_layers()
def _make_layers(self, out_channels, in_channels=256, feat_channels=256):
"""Initialize conv sequential for CornerHead."""
return nn.Sequential(
ConvModule(in_channels, feat_channels, 3, padding=1),
ConvModule(
feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None))
def _init_corner_kpt_layers(self):
"""Initialize corner keypoint layers.
Including corner heatmap branch and corner offset branch. Each branch
has two parts: prefix `tl_` for top-left and `br_` for bottom-right.
"""
self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList()
self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList()
self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList()
for _ in range(self.num_feat_levels):
self.tl_pool.append(
BiCornerPool(
self.in_channels, ['top', 'left'],
out_channels=self.in_channels))
self.br_pool.append(
BiCornerPool(
self.in_channels, ['bottom', 'right'],
out_channels=self.in_channels))
self.tl_heat.append(
self._make_layers(
out_channels=self.num_classes,
in_channels=self.in_channels))
self.br_heat.append(
self._make_layers(
out_channels=self.num_classes,
in_channels=self.in_channels))
self.tl_off.append(
self._make_layers(
out_channels=self.corner_offset_channels,
in_channels=self.in_channels))
self.br_off.append(
self._make_layers(
out_channels=self.corner_offset_channels,
in_channels=self.in_channels))
def _init_corner_emb_layers(self):
"""Initialize corner embedding layers.
Only include corner embedding branch with two parts: prefix `tl_` for
top-left and `br_` for bottom-right.
"""
self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList()
for _ in range(self.num_feat_levels):
self.tl_emb.append(
self._make_layers(
out_channels=self.corner_emb_channels,
in_channels=self.in_channels))
self.br_emb.append(
self._make_layers(
out_channels=self.corner_emb_channels,
in_channels=self.in_channels))
def _init_layers(self):
"""Initialize layers for CornerHead.
Including two parts: corner keypoint layers and corner embedding layers
"""
self._init_corner_kpt_layers()
if self.with_corner_emb:
self._init_corner_emb_layers()
def init_weights(self):
"""Initialize weights of the head."""
bias_init = bias_init_with_prob(0.1)
for i in range(self.num_feat_levels):
# The initialization of parameters are different between nn.Conv2d
# and ConvModule. Our experiments show that using the original
# initialization of nn.Conv2d increases the final mAP by about 0.2%
self.tl_heat[i][-1].conv.reset_parameters()
self.tl_heat[i][-1].conv.bias.data.fill_(bias_init)
self.br_heat[i][-1].conv.reset_parameters()
self.br_heat[i][-1].conv.bias.data.fill_(bias_init)
self.tl_off[i][-1].conv.reset_parameters()
self.br_off[i][-1].conv.reset_parameters()
if self.with_corner_emb:
self.tl_emb[i][-1].conv.reset_parameters()
self.br_emb[i][-1].conv.reset_parameters()
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of corner heatmaps, offset heatmaps and
embedding heatmaps.
- tl_heats (list[Tensor]): Top-left corner heatmaps for all
levels, each is a 4D-tensor, the channels number is
num_classes.
- br_heats (list[Tensor]): Bottom-right corner heatmaps for all
levels, each is a 4D-tensor, the channels number is
num_classes.
- tl_embs (list[Tensor] | list[None]): Top-left embedding
heatmaps for all levels, each is a 4D-tensor or None.
If not None, the channels number is corner_emb_channels.
- br_embs (list[Tensor] | list[None]): Bottom-right embedding
heatmaps for all levels, each is a 4D-tensor or None.
If not None, the channels number is corner_emb_channels.
- tl_offs (list[Tensor]): Top-left offset heatmaps for all
levels, each is a 4D-tensor. The channels number is
corner_offset_channels.
- br_offs (list[Tensor]): Bottom-right offset heatmaps for all
levels, each is a 4D-tensor. The channels number is
corner_offset_channels.
"""
lvl_ind = list(range(self.num_feat_levels))
return multi_apply(self.forward_single, feats, lvl_ind)
def forward_single(self, x, lvl_ind, return_pool=False):
"""Forward feature of a single level.
Args:
x (Tensor): Feature of a single level.
lvl_ind (int): Level index of current feature.
return_pool (bool): Return corner pool feature or not.
Returns:
tuple[Tensor]: A tuple of CornerHead's output for current feature
level. Containing the following Tensors:
- tl_heat (Tensor): Predicted top-left corner heatmap.
- br_heat (Tensor): Predicted bottom-right corner heatmap.
- tl_emb (Tensor | None): Predicted top-left embedding heatmap.
None for `self.with_corner_emb == False`.
- br_emb (Tensor | None): Predicted bottom-right embedding
heatmap. None for `self.with_corner_emb == False`.
- tl_off (Tensor): Predicted top-left offset heatmap.
- br_off (Tensor): Predicted bottom-right offset heatmap.
- tl_pool (Tensor): Top-left corner pool feature. Not must
have.
- br_pool (Tensor): Bottom-right corner pool feature. Not must
have.
"""
tl_pool = self.tl_pool[lvl_ind](x)
tl_heat = self.tl_heat[lvl_ind](tl_pool)
br_pool = self.br_pool[lvl_ind](x)
br_heat = self.br_heat[lvl_ind](br_pool)
tl_emb, br_emb = None, None
if self.with_corner_emb:
tl_emb = self.tl_emb[lvl_ind](tl_pool)
br_emb = self.br_emb[lvl_ind](br_pool)
tl_off = self.tl_off[lvl_ind](tl_pool)
br_off = self.br_off[lvl_ind](br_pool)
result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off]
if return_pool:
result_list.append(tl_pool)
result_list.append(br_pool)
return result_list
def get_targets(self,
gt_bboxes,
gt_labels,
feat_shape,
img_shape,
with_corner_emb=False,
with_guiding_shift=False,
with_centripetal_shift=False):
"""Generate corner targets.
Including corner heatmap, corner offset.
Optional: corner embedding, corner guiding shift, centripetal shift.
For CornerNet, we generate corner heatmap, corner offset and corner
embedding from this function.
For CentripetalNet, we generate corner heatmap, corner offset, guiding
shift and centripetal shift from this function.
Args:
gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each
has shape (num_gt, 4).
gt_labels (list[Tensor]): Ground truth labels of each box, each has
shape (num_gt,).
feat_shape (list[int]): Shape of output feature,
[batch, channel, height, width].
img_shape (list[int]): Shape of input image,
[height, width, channel].
with_corner_emb (bool): Generate corner embedding target or not.
Default: False.
with_guiding_shift (bool): Generate guiding shift target or not.
Default: False.
with_centripetal_shift (bool): Generate centripetal shift target or
not. Default: False.
Returns:
dict: Ground truth of corner heatmap, corner offset, corner
embedding, guiding shift and centripetal shift. Containing the
following keys:
- topleft_heatmap (Tensor): Ground truth top-left corner
heatmap.
- bottomright_heatmap (Tensor): Ground truth bottom-right
corner heatmap.
- topleft_offset (Tensor): Ground truth top-left corner offset.
- bottomright_offset (Tensor): Ground truth bottom-right corner
offset.
- corner_embedding (list[list[list[int]]]): Ground truth corner
embedding. Not must have.
- topleft_guiding_shift (Tensor): Ground truth top-left corner
guiding shift. Not must have.
- bottomright_guiding_shift (Tensor): Ground truth bottom-right
corner guiding shift. Not must have.
- topleft_centripetal_shift (Tensor): Ground truth top-left
corner centripetal shift. Not must have.
- bottomright_centripetal_shift (Tensor): Ground truth
bottom-right corner centripetal shift. Not must have.
"""
batch_size, _, height, width = feat_shape
img_h, img_w = img_shape[:2]
width_ratio = float(width / img_w)
height_ratio = float(height / img_h)
gt_tl_heatmap = gt_bboxes[-1].new_zeros(
[batch_size, self.num_classes, height, width])
gt_br_heatmap = gt_bboxes[-1].new_zeros(
[batch_size, self.num_classes, height, width])
gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
if with_corner_emb:
match = []
# Guiding shift is a kind of offset, from center to corner
if with_guiding_shift:
gt_tl_guiding_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
gt_br_guiding_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
# Centripetal shift is also a kind of offset, from center to corner
# and normalized by log.
if with_centripetal_shift:
gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
gt_br_centripetal_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
for batch_id in range(batch_size):
# Ground truth of corner embedding per image is a list of coord set
corner_match = []
for box_id in range(len(gt_labels[batch_id])):
left, top, right, bottom = gt_bboxes[batch_id][box_id]
center_x = (left + right) / 2.0
center_y = (top + bottom) / 2.0
label = gt_labels[batch_id][box_id]
# Use coords in the feature level to generate ground truth
scale_left = left * width_ratio
scale_right = right * width_ratio
scale_top = top * height_ratio
scale_bottom = bottom * height_ratio
scale_center_x = center_x * width_ratio
scale_center_y = center_y * height_ratio
# Int coords on feature map/ground truth tensor
left_idx = int(min(scale_left, width - 1))
right_idx = int(min(scale_right, width - 1))
top_idx = int(min(scale_top, height - 1))
bottom_idx = int(min(scale_bottom, height - 1))
# Generate gaussian heatmap
scale_box_width = ceil(scale_right - scale_left)
scale_box_height = ceil(scale_bottom - scale_top)
radius = gaussian_radius((scale_box_height, scale_box_width),
min_overlap=0.3)
radius = max(0, int(radius))
gt_tl_heatmap[batch_id, label] = gen_gaussian_target(
gt_tl_heatmap[batch_id, label], [left_idx, top_idx],
radius)
gt_br_heatmap[batch_id, label] = gen_gaussian_target(
gt_br_heatmap[batch_id, label], [right_idx, bottom_idx],
radius)
# Generate corner offset
left_offset = scale_left - left_idx
top_offset = scale_top - top_idx
right_offset = scale_right - right_idx
bottom_offset = scale_bottom - bottom_idx
gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset
gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset
gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset
gt_br_offset[batch_id, 1, bottom_idx,
right_idx] = bottom_offset
# Generate corner embedding
if with_corner_emb:
corner_match.append([[top_idx, left_idx],
[bottom_idx, right_idx]])
# Generate guiding shift
if with_guiding_shift:
gt_tl_guiding_shift[batch_id, 0, top_idx,
left_idx] = scale_center_x - left_idx
gt_tl_guiding_shift[batch_id, 1, top_idx,
left_idx] = scale_center_y - top_idx
gt_br_guiding_shift[batch_id, 0, bottom_idx,
right_idx] = right_idx - scale_center_x
gt_br_guiding_shift[
batch_id, 1, bottom_idx,
right_idx] = bottom_idx - scale_center_y
# Generate centripetal shift
if with_centripetal_shift:
gt_tl_centripetal_shift[batch_id, 0, top_idx,
left_idx] = log(scale_center_x -
scale_left)
gt_tl_centripetal_shift[batch_id, 1, top_idx,
left_idx] = log(scale_center_y -
scale_top)
gt_br_centripetal_shift[batch_id, 0, bottom_idx,
right_idx] = log(scale_right -
scale_center_x)
gt_br_centripetal_shift[batch_id, 1, bottom_idx,
right_idx] = log(scale_bottom -
scale_center_y)
if with_corner_emb:
match.append(corner_match)
target_result = dict(
topleft_heatmap=gt_tl_heatmap,
topleft_offset=gt_tl_offset,
bottomright_heatmap=gt_br_heatmap,
bottomright_offset=gt_br_offset)
if with_corner_emb:
target_result.update(corner_embedding=match)
if with_guiding_shift:
target_result.update(
topleft_guiding_shift=gt_tl_guiding_shift,
bottomright_guiding_shift=gt_br_guiding_shift)
if with_centripetal_shift:
target_result.update(
topleft_centripetal_shift=gt_tl_centripetal_shift,
bottomright_centripetal_shift=gt_br_centripetal_shift)
return target_result
def loss(self,
tl_heats,
br_heats,
tl_embs,
br_embs,
tl_offs,
br_offs,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
with shape (N, num_classes, H, W).
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
level with shape (N, num_classes, H, W).
tl_embs (list[Tensor]): Top-left corner embeddings for each level
with shape (N, corner_emb_channels, H, W).
br_embs (list[Tensor]): Bottom-right corner embeddings for each
level with shape (N, corner_emb_channels, H, W).
tl_offs (list[Tensor]): Top-left corner offsets for each level
with shape (N, corner_offset_channels, H, W).
br_offs (list[Tensor]): Bottom-right corner offsets for each level
with shape (N, corner_offset_channels, H, W).
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [left, top, right, bottom] format.
gt_labels (list[Tensor]): Class indices corresponding to each box.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components. Containing the
following losses:
- det_loss (list[Tensor]): Corner keypoint losses of all
feature levels.
- pull_loss (list[Tensor]): Part one of AssociativeEmbedding
losses of all feature levels.
- push_loss (list[Tensor]): Part two of AssociativeEmbedding
losses of all feature levels.
- off_loss (list[Tensor]): Corner offset losses of all feature
levels.
"""
targets = self.get_targets(
gt_bboxes,
gt_labels,
tl_heats[-1].shape,
img_metas[0]['pad_shape'],
with_corner_emb=self.with_corner_emb)
mlvl_targets = [targets for _ in range(self.num_feat_levels)]
det_losses, pull_losses, push_losses, off_losses = multi_apply(
self.loss_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs,
br_offs, mlvl_targets)
loss_dict = dict(det_loss=det_losses, off_loss=off_losses)
if self.with_corner_emb:
loss_dict.update(pull_loss=pull_losses, push_loss=push_losses)
return loss_dict
def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off,
targets):
"""Compute losses for single level.
Args:
tl_hmp (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_hmp (Tensor): Bottom-right corner heatmap for current level with
shape (N, num_classes, H, W).
tl_emb (Tensor): Top-left corner embedding for current level with
shape (N, corner_emb_channels, H, W).
br_emb (Tensor): Bottom-right corner embedding for current level
with shape (N, corner_emb_channels, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
targets (dict): Corner target generated by `get_targets`.
Returns:
tuple[torch.Tensor]: Losses of the head's differnet branches
containing the following losses:
- det_loss (Tensor): Corner keypoint loss.
- pull_loss (Tensor): Part one of AssociativeEmbedding loss.
- push_loss (Tensor): Part two of AssociativeEmbedding loss.
- off_loss (Tensor): Corner offset loss.
"""
gt_tl_hmp = targets['topleft_heatmap']
gt_br_hmp = targets['bottomright_heatmap']
gt_tl_off = targets['topleft_offset']
gt_br_off = targets['bottomright_offset']
gt_embedding = targets['corner_embedding']
# Detection loss
tl_det_loss = self.loss_heatmap(
tl_hmp.sigmoid(),
gt_tl_hmp,
avg_factor=max(1,
gt_tl_hmp.eq(1).sum()))
br_det_loss = self.loss_heatmap(
br_hmp.sigmoid(),
gt_br_hmp,
avg_factor=max(1,
gt_br_hmp.eq(1).sum()))
det_loss = (tl_det_loss + br_det_loss) / 2.0
# AssociativeEmbedding loss
if self.with_corner_emb and self.loss_embedding is not None:
pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb,
gt_embedding)
else:
pull_loss, push_loss = None, None
# Offset loss
# We only compute the offset loss at the real corner position.
# The value of real corner would be 1 in heatmap ground truth.
# The mask is computed in class agnostic mode and its shape is
# batch * 1 * width * height.
tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
gt_tl_hmp)
br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
gt_br_hmp)
tl_off_loss = self.loss_offset(
tl_off,
gt_tl_off,
tl_off_mask,
avg_factor=max(1, tl_off_mask.sum()))
br_off_loss = self.loss_offset(
br_off,
gt_br_off,
br_off_mask,
avg_factor=max(1, br_off_mask.sum()))
off_loss = (tl_off_loss + br_off_loss) / 2.0
return det_loss, pull_loss, push_loss, off_loss
def get_bboxes(self,
tl_heats,
br_heats,
tl_embs,
br_embs,
tl_offs,
br_offs,
img_metas,
rescale=False,
with_nms=True):
"""Transform network output for a batch into bbox predictions.
Args:
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
with shape (N, num_classes, H, W).
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
level with shape (N, num_classes, H, W).
tl_embs (list[Tensor]): Top-left corner embeddings for each level
with shape (N, corner_emb_channels, H, W).
br_embs (list[Tensor]): Bottom-right corner embeddings for each
level with shape (N, corner_emb_channels, H, W).
tl_offs (list[Tensor]): Top-left corner offsets for each level
with shape (N, corner_offset_channels, H, W).
br_offs (list[Tensor]): Bottom-right corner offsets for each level
with shape (N, corner_offset_channels, H, W).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
"""
assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas)
result_list = []
for img_id in range(len(img_metas)):
result_list.append(
self._get_bboxes_single(
tl_heats[-1][img_id:img_id + 1, :],
br_heats[-1][img_id:img_id + 1, :],
tl_offs[-1][img_id:img_id + 1, :],
br_offs[-1][img_id:img_id + 1, :],
img_metas[img_id],
tl_emb=tl_embs[-1][img_id:img_id + 1, :],
br_emb=br_embs[-1][img_id:img_id + 1, :],
rescale=rescale,
with_nms=with_nms))
return result_list
def _get_bboxes_single(self,
tl_heat,
br_heat,
tl_off,
br_off,
img_meta,
tl_emb=None,
br_emb=None,
tl_centripetal_shift=None,
br_centripetal_shift=None,
rescale=False,
with_nms=True):
"""Transform outputs for a single batch item into bbox predictions.
Args:
tl_heat (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_heat (Tensor): Bottom-right corner heatmap for current level
with shape (N, num_classes, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
img_meta (dict): Meta information of current image, e.g.,
image size, scaling factor, etc.
tl_emb (Tensor): Top-left corner embedding for current level with
shape (N, corner_emb_channels, H, W).
br_emb (Tensor): Bottom-right corner embedding for current level
with shape (N, corner_emb_channels, H, W).
tl_centripetal_shift: Top-left corner's centripetal shift for
current level with shape (N, 2, H, W).
br_centripetal_shift: Bottom-right corner's centripetal shift for
current level with shape (N, 2, H, W).
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
"""
if isinstance(img_meta, (list, tuple)):
img_meta = img_meta[0]
batch_bboxes, batch_scores, batch_clses = self.decode_heatmap(
tl_heat=tl_heat.sigmoid(),
br_heat=br_heat.sigmoid(),
tl_off=tl_off,
br_off=br_off,
tl_emb=tl_emb,
br_emb=br_emb,
tl_centripetal_shift=tl_centripetal_shift,
br_centripetal_shift=br_centripetal_shift,
img_meta=img_meta,
k=self.test_cfg.corner_topk,
kernel=self.test_cfg.local_maximum_kernel,
distance_threshold=self.test_cfg.distance_threshold)
if rescale:
batch_bboxes /= batch_bboxes.new_tensor(img_meta['scale_factor'])
bboxes = batch_bboxes.view([-1, 4])
scores = batch_scores.view([-1, 1])
clses = batch_clses.view([-1, 1])
idx = scores.argsort(dim=0, descending=True)
bboxes = bboxes[idx].view([-1, 4])
scores = scores[idx].view(-1)
clses = clses[idx].view(-1)
detections = torch.cat([bboxes, scores.unsqueeze(-1)], -1)
keepinds = (detections[:, -1] > -0.1)
detections = detections[keepinds]
labels = clses[keepinds]
if with_nms:
detections, labels = self._bboxes_nms(detections, labels,
self.test_cfg)
return detections, labels
def _bboxes_nms(self, bboxes, labels, cfg):
if labels.numel() == 0:
return bboxes, labels
out_bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, -1], labels,
cfg.nms_cfg)
out_labels = labels[keep]
if len(out_bboxes) > 0:
idx = torch.argsort(out_bboxes[:, -1], descending=True)
idx = idx[:cfg.max_per_img]
out_bboxes = out_bboxes[idx]
out_labels = out_labels[idx]
return out_bboxes, out_labels
def _gather_feat(self, feat, ind, mask=None):
"""Gather feature according to index.
Args:
feat (Tensor): Target feature map.
ind (Tensor): Target coord index.
mask (Tensor | None): Mask of featuremap. Default: None.
Returns:
feat (Tensor): Gathered feature.
"""
dim = feat.size(2)
ind = ind.unsqueeze(2).repeat(1, 1, dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _local_maximum(self, heat, kernel=3):
"""Extract local maximum pixel with given kernal.
Args:
heat (Tensor): Target heatmap.
kernel (int): Kernel size of max pooling. Default: 3.
Returns:
heat (Tensor): A heatmap where local maximum pixels maintain its
own value and other positions are 0.
"""
pad = (kernel - 1) // 2
hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)
keep = (hmax == heat).float()
return heat * keep
def _transpose_and_gather_feat(self, feat, ind):
"""Transpose and gather feature according to index.
Args:
feat (Tensor): Target feature map.
ind (Tensor): Target coord index.
Returns:
feat (Tensor): Transposed and gathered feature.
"""
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = self._gather_feat(feat, ind)
return feat
def _topk(self, scores, k=20):
"""Get top k positions from heatmap.
Args:
scores (Tensor): Target heatmap with shape
[batch, num_classes, height, width].
k (int): Target number. Default: 20.
Returns:
tuple[torch.Tensor]: Scores, indexes, categories and coords of
topk keypoint. Containing following Tensors:
- topk_scores (Tensor): Max scores of each topk keypoint.
- topk_inds (Tensor): Indexes of each topk keypoint.
- topk_clses (Tensor): Categories of each topk keypoint.
- topk_ys (Tensor): Y-coord of each topk keypoint.
- topk_xs (Tensor): X-coord of each topk keypoint.
"""
batch, _, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k)
topk_clses = topk_inds // (height * width)
topk_inds = topk_inds % (height * width)
topk_ys = topk_inds // width
topk_xs = (topk_inds % width).int().float()
return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs
def decode_heatmap(self,
tl_heat,
br_heat,
tl_off,
br_off,
tl_emb=None,
br_emb=None,
tl_centripetal_shift=None,
br_centripetal_shift=None,
img_meta=None,
k=100,
kernel=3,
distance_threshold=0.5,
num_dets=1000):
"""Transform outputs for a single batch item into raw bbox predictions.
Args:
tl_heat (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_heat (Tensor): Bottom-right corner heatmap for current level
with shape (N, num_classes, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
tl_emb (Tensor | None): Top-left corner embedding for current
level with shape (N, corner_emb_channels, H, W).
br_emb (Tensor | None): Bottom-right corner embedding for current
level with shape (N, corner_emb_channels, H, W).
tl_centripetal_shift (Tensor | None): Top-left centripetal shift
for current level with shape (N, 2, H, W).
br_centripetal_shift (Tensor | None): Bottom-right centripetal
shift for current level with shape (N, 2, H, W).
img_meta (dict): Meta information of current image, e.g.,
image size, scaling factor, etc.
k (int): Get top k corner keypoints from heatmap.
kernel (int): Max pooling kernel for extract local maximum pixels.
distance_threshold (float): Distance threshold. Top-left and
bottom-right corner keypoints with feature distance less than
the threshold will be regarded as keypoints from same object.
num_dets (int): Num of raw boxes before doing nms.
Returns:
tuple[torch.Tensor]: Decoded output of CornerHead, containing the
following Tensors:
- bboxes (Tensor): Coords of each box.
- scores (Tensor): Scores of each box.
- clses (Tensor): Categories of each box.
"""
with_embedding = tl_emb is not None and br_emb is not None
with_centripetal_shift = (
tl_centripetal_shift is not None
and br_centripetal_shift is not None)
assert with_embedding + with_centripetal_shift == 1
batch, _, height, width = tl_heat.size()
inp_h, inp_w, _ = img_meta['pad_shape']
# perform nms on heatmaps
tl_heat = self._local_maximum(tl_heat, kernel=kernel)
br_heat = self._local_maximum(br_heat, kernel=kernel)
tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = self._topk(tl_heat, k=k)
br_scores, br_inds, br_clses, br_ys, br_xs = self._topk(br_heat, k=k)
# We use repeat instead of expand here because expand is a
# shallow-copy function. Thus it could cause unexpected testing result
# sometimes. Using expand will decrease about 10% mAP during testing
# compared to repeat.
tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k)
tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k)
br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1)
br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1)
tl_off = self._transpose_and_gather_feat(tl_off, tl_inds)
tl_off = tl_off.view(batch, k, 1, 2)
br_off = self._transpose_and_gather_feat(br_off, br_inds)
br_off = br_off.view(batch, 1, k, 2)
tl_xs = tl_xs + tl_off[..., 0]
tl_ys = tl_ys + tl_off[..., 1]
br_xs = br_xs + br_off[..., 0]
br_ys = br_ys + br_off[..., 1]
if with_centripetal_shift:
tl_centripetal_shift = self._transpose_and_gather_feat(
tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp()
br_centripetal_shift = self._transpose_and_gather_feat(
br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp()
tl_ctxs = tl_xs + tl_centripetal_shift[..., 0]
tl_ctys = tl_ys + tl_centripetal_shift[..., 1]
br_ctxs = br_xs - br_centripetal_shift[..., 0]
br_ctys = br_ys - br_centripetal_shift[..., 1]
# all possible boxes based on top k corners (ignoring class)
tl_xs *= (inp_w / width)
tl_ys *= (inp_h / height)
br_xs *= (inp_w / width)
br_ys *= (inp_h / height)
if with_centripetal_shift:
tl_ctxs *= (inp_w / width)
tl_ctys *= (inp_h / height)
br_ctxs *= (inp_w / width)
br_ctys *= (inp_h / height)
x_off = img_meta['border'][2]
y_off = img_meta['border'][0]
tl_xs -= x_off
tl_ys -= y_off
br_xs -= x_off
br_ys -= y_off
tl_xs *= tl_xs.gt(0.0).type_as(tl_xs)
tl_ys *= tl_ys.gt(0.0).type_as(tl_ys)
br_xs *= br_xs.gt(0.0).type_as(br_xs)
br_ys *= br_ys.gt(0.0).type_as(br_ys)
bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3)
area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs()
if with_centripetal_shift:
tl_ctxs -= x_off
tl_ctys -= y_off
br_ctxs -= x_off
br_ctys -= y_off
tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs)
tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys)
br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs)
br_ctys *= br_ctys.gt(0.0).type_as(br_ctys)
ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys),
dim=3)
area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs()
rcentral = torch.zeros_like(ct_bboxes)
# magic nums from paper section 4.1
mu = torch.ones_like(area_bboxes) / 2.4
mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu
bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2
bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2
rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] -
bboxes[..., 0]) / 2
rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] -
bboxes[..., 1]) / 2
rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] -
bboxes[..., 0]) / 2
rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] -
bboxes[..., 1]) / 2
area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) *
(rcentral[..., 3] - rcentral[..., 1])).abs()
dists = area_ct_bboxes / area_rcentral
tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | (
ct_bboxes[..., 0] >= rcentral[..., 2])
tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | (
ct_bboxes[..., 1] >= rcentral[..., 3])
br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | (
ct_bboxes[..., 2] >= rcentral[..., 2])
br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | (
ct_bboxes[..., 3] >= rcentral[..., 3])
if with_embedding:
tl_emb = self._transpose_and_gather_feat(tl_emb, tl_inds)
tl_emb = tl_emb.view(batch, k, 1)
br_emb = self._transpose_and_gather_feat(br_emb, br_inds)
br_emb = br_emb.view(batch, 1, k)
dists = torch.abs(tl_emb - br_emb)
tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k)
br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1)
scores = (tl_scores + br_scores) / 2 # scores for all possible boxes
# tl and br should have same class
tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k)
br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1)
cls_inds = (tl_clses != br_clses)
# reject boxes based on distances
dist_inds = dists > distance_threshold
# reject boxes based on widths and heights
width_inds = (br_xs <= tl_xs)
height_inds = (br_ys <= tl_ys)
scores[cls_inds] = -1
scores[width_inds] = -1
scores[height_inds] = -1
scores[dist_inds] = -1
if with_centripetal_shift:
scores[tl_ctx_inds] = -1
scores[tl_cty_inds] = -1
scores[br_ctx_inds] = -1
scores[br_cty_inds] = -1
scores = scores.view(batch, -1)
scores, inds = torch.topk(scores, num_dets)
scores = scores.unsqueeze(2)
bboxes = bboxes.view(batch, -1, 4)
bboxes = self._gather_feat(bboxes, inds)
clses = tl_clses.contiguous().view(batch, -1, 1)
clses = self._gather_feat(clses, inds).float()
return bboxes, scores, clses
```
#### File: mmdetection/tests/test_anchor.py
```python
import torch
def test_standard_anchor_generator():
from mmdet.core.anchor import build_anchor_generator
anchor_generator_cfg = dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8])
anchor_generator = build_anchor_generator(anchor_generator_cfg)
assert anchor_generator is not None
def test_strides():
from mmdet.core import AnchorGenerator
# Square strides
self = AnchorGenerator([10], [1.], [1.], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],
[-5., 5., 5., 15.], [5., 5., 15., 15.]])
assert torch.equal(anchors[0], expected_anchors)
# Different strides in x and y direction
self = AnchorGenerator([(10, 20)], [1.], [1.], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],
[-5., 15., 5., 25.], [5., 15., 15., 25.]])
assert torch.equal(anchors[0], expected_anchors)
def test_ssd_anchor_generator():
from mmdet.core.anchor import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
# check base anchors
expected_base_anchors = [
torch.Tensor([[-6.5000, -6.5000, 14.5000, 14.5000],
[-11.3704, -11.3704, 19.3704, 19.3704],
[-10.8492, -3.4246, 18.8492, 11.4246],
[-3.4246, -10.8492, 11.4246, 18.8492]]),
torch.Tensor([[-14.5000, -14.5000, 30.5000, 30.5000],
[-25.3729, -25.3729, 41.3729, 41.3729],
[-23.8198, -7.9099, 39.8198, 23.9099],
[-7.9099, -23.8198, 23.9099, 39.8198],
[-30.9711, -4.9904, 46.9711, 20.9904],
[-4.9904, -30.9711, 20.9904, 46.9711]]),
torch.Tensor([[-33.5000, -33.5000, 65.5000, 65.5000],
[-45.5366, -45.5366, 77.5366, 77.5366],
[-54.0036, -19.0018, 86.0036, 51.0018],
[-19.0018, -54.0036, 51.0018, 86.0036],
[-69.7365, -12.5788, 101.7365, 44.5788],
[-12.5788, -69.7365, 44.5788, 101.7365]]),
torch.Tensor([[-44.5000, -44.5000, 108.5000, 108.5000],
[-56.9817, -56.9817, 120.9817, 120.9817],
[-76.1873, -22.0937, 140.1873, 86.0937],
[-22.0937, -76.1873, 86.0937, 140.1873],
[-100.5019, -12.1673, 164.5019, 76.1673],
[-12.1673, -100.5019, 76.1673, 164.5019]]),
torch.Tensor([[-53.5000, -53.5000, 153.5000, 153.5000],
[-66.2185, -66.2185, 166.2185, 166.2185],
[-96.3711, -23.1855, 196.3711, 123.1855],
[-23.1855, -96.3711, 123.1855, 196.3711]]),
torch.Tensor([[19.5000, 19.5000, 280.5000, 280.5000],
[6.6342, 6.6342, 293.3658, 293.3658],
[-34.5549, 57.7226, 334.5549, 242.2774],
[57.7226, -34.5549, 242.2774, 334.5549]]),
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [5776, 2166, 600, 150, 36, 4]
multi_level_valid_flags = anchor_generator.valid_flags(
featmap_sizes, (300, 300), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [4, 6, 6, 6, 4, 4]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 6
def test_anchor_generator_with_tuples():
from mmdet.core.anchor import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
anchor_generator_cfg_tuples = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[(8, 8), (16, 16), (32, 32), (64, 64), (100, 100), (300, 300)],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
anchor_generator_tuples = build_anchor_generator(
anchor_generator_cfg_tuples)
anchors_tuples = anchor_generator_tuples.grid_anchors(
featmap_sizes, device)
for anchor, anchor_tuples in zip(anchors, anchors_tuples):
assert torch.equal(anchor, anchor_tuples)
def test_yolo_anchor_generator():
from mmdet.core.anchor import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='YOLOAnchorGenerator',
strides=[32, 16, 8],
base_sizes=[
[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)],
])
featmap_sizes = [(14, 18), (28, 36), (56, 72)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
# check base anchors
expected_base_anchors = [
torch.Tensor([[-42.0000, -29.0000, 74.0000, 61.0000],
[-62.0000, -83.0000, 94.0000, 115.0000],
[-170.5000, -147.0000, 202.5000, 179.0000]]),
torch.Tensor([[-7.0000, -22.5000, 23.0000, 38.5000],
[-23.0000, -14.5000, 39.0000, 30.5000],
[-21.5000, -51.5000, 37.5000, 67.5000]]),
torch.Tensor([[-1.0000, -2.5000, 9.0000, 10.5000],
[-4.0000, -11.0000, 12.0000, 19.0000],
[-12.5000, -7.5000, 20.5000, 15.5000]])
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [3, 3, 3]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 3
def test_retina_anchor():
from mmdet.models import build_head
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# head configs modified from
# configs/nas_fpn/retinanet_r50_fpn_crop640_50e.py
bbox_head = dict(
type='RetinaSepBNHead',
num_classes=4,
num_ins=5,
in_channels=4,
stacked_convs=1,
feat_channels=4,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]))
retina_head = build_head(bbox_head)
assert retina_head.anchor_generator is not None
# use the featmap sizes in NASFPN setting to test retina head
featmap_sizes = [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
# check base anchors
expected_base_anchors = [
torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137],
[-28.5088, -14.2544, 28.5088, 14.2544],
[-35.9188, -17.9594, 35.9188, 17.9594],
[-16.0000, -16.0000, 16.0000, 16.0000],
[-20.1587, -20.1587, 20.1587, 20.1587],
[-25.3984, -25.3984, 25.3984, 25.3984],
[-11.3137, -22.6274, 11.3137, 22.6274],
[-14.2544, -28.5088, 14.2544, 28.5088],
[-17.9594, -35.9188, 17.9594, 35.9188]]),
torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274],
[-57.0175, -28.5088, 57.0175, 28.5088],
[-71.8376, -35.9188, 71.8376, 35.9188],
[-32.0000, -32.0000, 32.0000, 32.0000],
[-40.3175, -40.3175, 40.3175, 40.3175],
[-50.7968, -50.7968, 50.7968, 50.7968],
[-22.6274, -45.2548, 22.6274, 45.2548],
[-28.5088, -57.0175, 28.5088, 57.0175],
[-35.9188, -71.8376, 35.9188, 71.8376]]),
torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548],
[-114.0350, -57.0175, 114.0350, 57.0175],
[-143.6751, -71.8376, 143.6751, 71.8376],
[-64.0000, -64.0000, 64.0000, 64.0000],
[-80.6349, -80.6349, 80.6349, 80.6349],
[-101.5937, -101.5937, 101.5937, 101.5937],
[-45.2548, -90.5097, 45.2548, 90.5097],
[-57.0175, -114.0350, 57.0175, 114.0350],
[-71.8376, -143.6751, 71.8376, 143.6751]]),
torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097],
[-228.0701, -114.0350, 228.0701, 114.0350],
[-287.3503, -143.6751, 287.3503, 143.6751],
[-128.0000, -128.0000, 128.0000, 128.0000],
[-161.2699, -161.2699, 161.2699, 161.2699],
[-203.1873, -203.1873, 203.1873, 203.1873],
[-90.5097, -181.0193, 90.5097, 181.0193],
[-114.0350, -228.0701, 114.0350, 228.0701],
[-143.6751, -287.3503, 143.6751, 287.3503]]),
torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193],
[-456.1401, -228.0701, 456.1401, 228.0701],
[-574.7006, -287.3503, 574.7006, 287.3503],
[-256.0000, -256.0000, 256.0000, 256.0000],
[-322.5398, -322.5398, 322.5398, 322.5398],
[-406.3747, -406.3747, 406.3747, 406.3747],
[-181.0193, -362.0387, 181.0193, 362.0387],
[-228.0701, -456.1401, 228.0701, 456.1401],
[-287.3503, -574.7006, 287.3503, 574.7006]])
]
base_anchors = retina_head.anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [57600, 14400, 3600, 900, 225]
multi_level_valid_flags = retina_head.anchor_generator.valid_flags(
featmap_sizes, (640, 640), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert retina_head.anchor_generator.num_base_anchors == [9, 9, 9, 9, 9]
# check anchor generation
anchors = retina_head.anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 5
def test_guided_anchor():
from mmdet.models import build_head
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# head configs modified from
# configs/guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py
bbox_head = dict(
type='GARetinaHead',
num_classes=8,
in_channels=4,
stacked_convs=1,
feat_channels=4,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]))
ga_retina_head = build_head(bbox_head)
assert ga_retina_head.approx_anchor_generator is not None
# use the featmap sizes in NASFPN setting to test ga_retina_head
featmap_sizes = [(100, 152), (50, 76), (25, 38), (13, 19), (7, 10)]
# check base anchors
expected_approxs = [
torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137],
[-28.5088, -14.2544, 28.5088, 14.2544],
[-35.9188, -17.9594, 35.9188, 17.9594],
[-16.0000, -16.0000, 16.0000, 16.0000],
[-20.1587, -20.1587, 20.1587, 20.1587],
[-25.3984, -25.3984, 25.3984, 25.3984],
[-11.3137, -22.6274, 11.3137, 22.6274],
[-14.2544, -28.5088, 14.2544, 28.5088],
[-17.9594, -35.9188, 17.9594, 35.9188]]),
torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274],
[-57.0175, -28.5088, 57.0175, 28.5088],
[-71.8376, -35.9188, 71.8376, 35.9188],
[-32.0000, -32.0000, 32.0000, 32.0000],
[-40.3175, -40.3175, 40.3175, 40.3175],
[-50.7968, -50.7968, 50.7968, 50.7968],
[-22.6274, -45.2548, 22.6274, 45.2548],
[-28.5088, -57.0175, 28.5088, 57.0175],
[-35.9188, -71.8376, 35.9188, 71.8376]]),
torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548],
[-114.0350, -57.0175, 114.0350, 57.0175],
[-143.6751, -71.8376, 143.6751, 71.8376],
[-64.0000, -64.0000, 64.0000, 64.0000],
[-80.6349, -80.6349, 80.6349, 80.6349],
[-101.5937, -101.5937, 101.5937, 101.5937],
[-45.2548, -90.5097, 45.2548, 90.5097],
[-57.0175, -114.0350, 57.0175, 114.0350],
[-71.8376, -143.6751, 71.8376, 143.6751]]),
torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097],
[-228.0701, -114.0350, 228.0701, 114.0350],
[-287.3503, -143.6751, 287.3503, 143.6751],
[-128.0000, -128.0000, 128.0000, 128.0000],
[-161.2699, -161.2699, 161.2699, 161.2699],
[-203.1873, -203.1873, 203.1873, 203.1873],
[-90.5097, -181.0193, 90.5097, 181.0193],
[-114.0350, -228.0701, 114.0350, 228.0701],
[-143.6751, -287.3503, 143.6751, 287.3503]]),
torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193],
[-456.1401, -228.0701, 456.1401, 228.0701],
[-574.7006, -287.3503, 574.7006, 287.3503],
[-256.0000, -256.0000, 256.0000, 256.0000],
[-322.5398, -322.5398, 322.5398, 322.5398],
[-406.3747, -406.3747, 406.3747, 406.3747],
[-181.0193, -362.0387, 181.0193, 362.0387],
[-228.0701, -456.1401, 228.0701, 456.1401],
[-287.3503, -574.7006, 287.3503, 574.7006]])
]
approxs = ga_retina_head.approx_anchor_generator.base_anchors
for i, base_anchor in enumerate(approxs):
assert base_anchor.allclose(expected_approxs[i])
# check valid flags
expected_valid_pixels = [136800, 34200, 8550, 2223, 630]
multi_level_valid_flags = ga_retina_head.approx_anchor_generator \
.valid_flags(featmap_sizes, (800, 1216), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert ga_retina_head.approx_anchor_generator.num_base_anchors == [
9, 9, 9, 9, 9
]
# check approx generation
squares = ga_retina_head.square_anchor_generator.grid_anchors(
featmap_sizes, device)
assert len(squares) == 5
expected_squares = [
torch.Tensor([[-16., -16., 16., 16.]]),
torch.Tensor([[-32., -32., 32., 32]]),
torch.Tensor([[-64., -64., 64., 64.]]),
torch.Tensor([[-128., -128., 128., 128.]]),
torch.Tensor([[-256., -256., 256., 256.]])
]
squares = ga_retina_head.square_anchor_generator.base_anchors
for i, base_anchor in enumerate(squares):
assert base_anchor.allclose(expected_squares[i])
# square_anchor_generator does not check valid flags
# check number of base anchors for each level
assert (ga_retina_head.square_anchor_generator.num_base_anchors == [
1, 1, 1, 1, 1
])
# check square generation
anchors = ga_retina_head.square_anchor_generator.grid_anchors(
featmap_sizes, device)
assert len(anchors) == 5
``` |
{
"source": "jiangrongbo/akshare",
"score": 2
} |
#### File: akshare/stock_feature/stock_em_yjbb.py
```python
import pandas as pd
import requests
from tqdm import tqdm
def stock_em_yjbb(date: str = "20200331") -> pd.DataFrame:
"""
东方财富-数据中心-年报季报-业绩快报-业绩报表
http://data.eastmoney.com/bbsj/202003/yjbb.html
:param date: "20200331", "20200630", "20200930", "20201231"; 从 20100331 开始
:type date: str
:return: 业绩报表
:rtype: pandas.DataFrame
"""
url = "http://datacenter.eastmoney.com/api/data/get"
params = {
"st": "UPDATE_DATE,SECURITY_CODE",
"sr": "-1,-1",
"ps": "5000",
"p": "1",
"type": "RPT_LICO_FN_CPD",
"sty": "ALL",
"token": "<KEY>",
"filter": f"(REPORTDATE='{'-'.join([date[:4], date[4:6], date[6:]])}')",
}
r = requests.get(url, params=params)
data_json = r.json()
page_num = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1), leave=False):
params = {
"st": "UPDATE_DATE,SECURITY_CODE",
"sr": "-1,-1",
"ps": "500",
"p": page,
"type": "RPT_LICO_FN_CPD",
"sty": "ALL",
"token": "<KEY>",
"filter": f"(REPORTDATE='{'-'.join([date[:4], date[4:6], date[6:]])}')",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = range(1, len(big_df) + 1)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"_",
"_",
"_",
"_",
"最新公告日期",
"_",
"每股收益",
"_",
"营业收入-营业收入",
"净利润-净利润",
"净资产收益率",
"营业收入-同比增长",
"净利润-同比增长",
"每股净资产",
"每股经营现金流量",
"销售毛利率",
"营业收入-季度环比增长",
"净利润-季度环比增长",
"_",
"_",
"所处行业",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
big_df = big_df[
[
"序号",
"股票代码",
"股票简称",
"每股收益",
"营业收入-营业收入",
"营业收入-同比增长",
"营业收入-季度环比增长",
"净利润-净利润",
"净利润-同比增长",
"净利润-季度环比增长",
"每股净资产",
"净资产收益率",
"每股经营现金流量",
"销售毛利率",
"所处行业",
"最新公告日期",
]
]
return big_df
if __name__ == "__main__":
stock_em_yjbb_df = stock_em_yjbb(date="20200331")
print(stock_em_yjbb_df)
```
#### File: akshare/tests/test_back_trade.py
```python
import unittest
import backtrader as bt
import pandas as pd
import akshare as ak
pd.set_option('display.max_rows', 5000)
pd.set_option('display.max_columns', 5000)
pd.set_option('display.width', 5000)
class StockTest(unittest.TestCase):
def test_stock_back_trade(self):
stock_zh_a_hist_df = ak.stock_zh_a_hist("000001")
print(stock_zh_a_hist_df)
columns = {
'日期': 'date',
'开盘': 'open',
'收盘': 'close',
'最高': 'high',
'最低': 'low',
'成交量': 'volume',
'成交额': 'amount',
'振幅': 'amplitude',
'涨跌幅': 'applies',
'涨跌额': 'applies_amount',
'换手率': 'turnover_rate'
}
stock_zh_a_hist_df.rename(columns=columns, inplace=True)
stock_zh_a_hist_df['date'] = pd.to_datetime(stock_zh_a_hist_df['date'])
stock_zh_a_hist_df.set_index('date', inplace=True)
print(stock_zh_a_hist_df)
class MyCross(bt.Strategy):
def start(self):
pass
def next(self):
print('hello')
cerebro = bt.Cerebro()
cerebro.addstrategy(MyCross)
data0 = bt.feeds.PandasData(dataname=stock_zh_a_hist_df)
cerebro.adddata(data0)
cerebro.run()
cerebro.plot(style='candle')
pass
if __name__ == '__main__':
unittest.main()
```
#### File: akshare/tests/test_stock_industry.py
```python
import unittest
import akshare as ak
import pandas as pd
import time
pd.set_option('display.max_rows', 5000)
pd.set_option('display.max_columns', 5000)
pd.set_option('display.width', 5000)
class StockTest(unittest.TestCase):
# turnoverratio 换手率
def test_stock_industry(self):
stock_industry_df = pd.read_csv('stock_industry2021_08_20.csv')
print(stock_industry_df.columns)
stock_industry_df = stock_industry_df[[
'code', 'name', 'trade', 'open', 'high', 'low', 'volume', 'amount', 'turnoverratio', 'per', 'pb',
'industry']]
stock_industry_df = stock_industry_df.query('per > 0')
stock_industry_df["per"] = pd.to_numeric(stock_industry_df["per"], errors='coerce').fillna(0)
stock_industry_df["pb"] = pd.to_numeric(stock_industry_df["pb"], errors='coerce').fillna(0)
stock_industry_df["volume"] = pd.to_numeric(stock_industry_df["volume"], errors='coerce').fillna(0)
stock_industry_df["amount"] = pd.to_numeric(stock_industry_df["amount"], errors='coerce').fillna(0)
stock_industry_df = stock_industry_df.groupby(by=['industry'], group_keys=True).apply(
lambda x: x.sort_values('per', ascending=False))
stock_industry_df.to_csv('stock_industry_result.csv')
print(stock_industry_df[['code', 'name', 'trade', 'per', 'industry']])
def test_stock_info_sz_name_code(self):
stock_info_sz_name_code_df = ak.stock_info_sz_name_code(indicator='A股列表')
print(stock_info_sz_name_code_df)
def test_stock_company_summary_info(self):
stock_company_summary_info_df = ak.stock_company_summary_info()
stock_company_summary_info_df.to_csv("stock_company_summary_info2021_08_24.csv")
def test_stock_company_summary_info_merge(self):
pd1 = pd.read_csv('stock_company_summary_info2021_08_22.csv')
pd2 = pd.read_csv('stock_company_summary_info2021_08_22_2.csv')
pd1 = pd1.append(pd2, ignore_index=True)
pd1['股票代码'] = pd1['股票代码'].apply(lambda x: str(x).rjust(6, '0'))
pd1.drop('Unnamed: 0', axis=1, inplace=True)
print(pd1)
pd1.to_csv('stock_company_summary_info.csv')
def test_merge(self):
stock_industry_df = pd.read_csv('stock_industry2021_08_20.csv')
stock_industry_df.drop_duplicates(inplace=True)
stock_industry_df.drop('Unnamed: 0', axis=True, inplace=True)
stock_industry_df['code'] = stock_industry_df['code'].apply(lambda x: str(x).rjust(6, '0'))
print(stock_industry_df)
stock_industry_df.to_csv('stock_industry.csv')
def test_big_table(self):
df1 = pd.read_csv('stock_industry.csv')
# df2 = pd.read_csv('stock_company_summary_info.csv')
# df3 = pd.merge(df1,df2,how='left', left_on=['code'],right_on=['股票代码'])
print(df1['code'].apply(lambda x: str(x).rjust(6, '0')))
# print(df1['股票名称'])
# df3.to_csv('test.csv')
# print(df1)
# print(df3)
if __name__ == '__main__':
unittest.main()
```
#### File: akshare/tests/test_stock_info.py
```python
import unittest
import akshare as ak
import pandas as pd
import talib as ta
pd.set_option('display.max_rows', 5000)
pd.set_option('display.max_columns', 5000)
pd.set_option('display.width', 5000)
class StockTest(unittest.TestCase):
def test_stock_history(self):
stock_zh_a_hist_df = ak.stock_zh_a_hist("000001")
print(stock_zh_a_hist_df)
dif, dea, hist = ta.MACD(stock_zh_a_hist_df['收盘'].astype(float).values, fastperiod=12, slowperiod=26,
signalperiod=9)
stock_zh_a_hist_df['date'] = stock_zh_a_hist_df['日期']
macd_df = pd.DataFrame({'dif': dif[10:], 'dea': dea[10:], 'hist': hist[10:]},
index=stock_zh_a_hist_df['date'][10:], columns=['dif', 'dea', 'hist'])
macd_df = macd_df.tail(2)
if (macd_df.iloc[0, 0] <= macd_df.iloc[0, 1]) & (macd_df.iloc[1, 0] >= macd_df.iloc[1, 1]):
print("true")
pass
print(macd_df)
pass
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jiangrongbo/pysnowball",
"score": 3
} |
#### File: pysnowball/pysnowball/capital.py
```python
import json
import os
from pysnowball import cons
from pysnowball import api_ref
from pysnowball import utls
"""
融资融券
融资融券数据
"""
def margin(symbol, page=1, size=180):
url = api_ref.capital_margin_url + symbol
url = url + '&page=' + str(page)
url = url + '&size=' + str(size)
return utls.fetch(url)
"""
大宗交易
大宗交易数据
"""
def blocktrans(symbol, page=1, size=30):
url = api_ref.capital_blocktrans_url + symbol
url = url + '&page=' + str(page)
url = url + '&size=' + str(size)
return utls.fetch(url)
"""
资金成交分布
获取资金成交分布数据
"""
def capital_assort(symbol):
url = api_ref.capital_assort_url + symbol
return utls.fetch(url)
"""
资金流向趋势
获取当日资金流如流出数据,每分钟数据
"""
def capital_flow(symbol):
url = api_ref.capital_flow_url + symbol
return utls.fetch(url)
"""
资金流向历史
获取历史资金流如流出数据,每日数据
"""
def capital_history(symbol, count=20):
url = api_ref.capital_history_url + symbol
url = url + '&count=' + str(count)
return utls.fetch(url)
```
#### File: pysnowball/pysnowball/realtime.py
```python
import json
import os
from pysnowball import cons
from pysnowball import api_ref
from pysnowball import utls
"""
实时行情
获取某支股票的行情数据
"""
def quotec(symbols):
url = api_ref.realtime_quote + symbols
return utls.fetch_without_token(url)
"""
实时分笔
获取实时分笔数据,可以实时取得股票当前报价和成交信息
"""
def pankou(symbol):
url = api_ref.realtime_pankou + symbol
return utls.fetch(url)
```
#### File: pysnowball/pysnowball/report.py
```python
from pysnowball import api_ref
from pysnowball import utls
"""
机构评级
获取机构评级数据
"""
def report(symbol):
url = api_ref.report_latest_url + symbol
return utls.fetch(url)
"""
业绩预告
按年度获取业绩预告数据
"""
def earningforecast(symbol):
url = api_ref.report_earningforecast_url + symbol
return utls.fetch(url)
```
#### File: pysnowball/test/test_datatable.py
```python
import unittest
import pandas as pd
import pysnowball as ball
import datatable as dt
pd.set_option('display.max_rows', 5000)
pd.set_option('display.max_columns', 5000)
pd.set_option('display.width', 5000)
class DataTableTest(unittest.TestCase):
def test_datatable(self):
dt1 = dt.fread("test.csv")
dt1
if __name__ == '__main__':
unittest.main()
```
#### File: pysnowball/test/test_stock.py
```python
import unittest
import os
import pandas as pd
import pysnowball as ball
import numpy as np
from pandasgui import show
# import qgrid
# from pivottablejs import pivot_ui
# import tabloo
# import dash
# import dash_table
pd.set_option('display.max_rows', 5000)
pd.set_option('display.max_columns', 5000)
pd.set_option('display.width', 5000)
class StockTest(unittest.TestCase):
def test_batch_query_stock(self):
symbols = 'SH688027,SZ300363,SH603688,SZ300123,SZ300481,SH688226,SZ300040,SZ300830,SH600259,SH600468,SZ300260,SH688117,SZ300327,SH600490,SH603989,SZ002266,SH603993,SZ300082,SH603160,SH600610,SH603416,SZ002485,SZ300215,SH600667,SZ300638,SZ002706,SZ300972,SH601968,SH688536,SZ300835,SZ002508,SZ002959,SZ000550,SH688533,SZ002214,SH603633,SH688069,SH600460,SH601969,SH603088,SH603661,SZ300239,SH601236,SZ300595,SZ002645,SZ300625,SZ300722,SZ002860,SZ002759,SZ002155'
batch_query_stock_df = ball.batch_query_stock(symbols)
print(batch_query_stock_df)
pass
def test_list_free_item(self):
print(ball.list_free_item())
def test_list_free_stock(self):
list_free_stock_df = ball.list_free_stock(4)
print(list_free_stock_df)
def test_list_free_item_info(self):
list_free_stock_df = ball.list_free_stock(4)
list_free_stock_df['assist'] = 1
# symbols = list_free_stock_df['symbol'].groupby(list_free_stock_df['assist']).agg(','.join).astype(str)
# print(symbols)
symbols = ''
batch_query_stock_df = ball.batch_query_stock(symbols)
full_stock_info_df = pd.merge(list_free_stock_df, batch_query_stock_df, on='symbol')
print(full_stock_info_df)
def test_list_free_stock(self):
list_free_item_df = ball.list_free_item()
list_free_item_df = list_free_item_df.query('name == "20210917KDJ"')
print(list_free_item_df)
pid = list_free_item_df.iloc[0]['id']
list_free_stock_df = ball.list_free_stock(pid)
list_free_stock_df_size = int(list_free_stock_df.shape[0] / 50) + 1
sub_arys = np.array_split(list_free_stock_df, list_free_stock_df_size)
tmp_stock_df = pd.DataFrame()
for sub_list_free_stock_df in sub_arys:
symbols = ','.join(sub_list_free_stock_df['symbol'].values.tolist())
batch_query_stock_df = ball.batch_query_stock(symbols)
tmp_stock_df = tmp_stock_df.append(batch_query_stock_df)
full_stock_info_df = pd.merge(list_free_stock_df, tmp_stock_df, on='symbol')
# full_stock_info_df.to_html()
show(full_stock_info_df, settings={'block': True, 'theme': 'dark'})
# widget = qgrid.show_grid(full_stock_info_df)
# pivot_ui(full_stock_info_df)
# tabloo.show(full_stock_info_df)
# pprint.pprint(full_stock_info_df)
# dtale.show(full_stock_info_df)
# display(full_stock_info_df)
# app = dash.Dash('test')
#
# app.layout = dash_table.DataTable(
# id='table',
# columns=[{"name": i, "id": i} for i in full_stock_info_df.columns],
# data=full_stock_info_df.to_dict('records'),
# )
#
# app.run_server(debug=True)
def test_query_stock(self):
print(ball.batch_query_stock('SZ300626'))
def test_stock_basic(self):
indicators = {
'pettm': '市盈率TTM',
'roediluted': '净资产收益率',
'bps': '每股净资产',
'pelyr': '市盈率LYR',
'npay': '净利润同比增长',
'eps': '每股收益',
'netprofit': '净利润',
'dy_l': '股息收益率',
'psr': '市销率(倍)',
'pb': '市净率MRQ',
'total_revenue': '营业收入', # total_revenue.20210630
'mc': '总市值',
'fmc': '流通市值',
'niota': '总资产报酬率',
'oiy': '营业收入同比增长',
'deal': '累计交易分享数',
'follow7d': '一周新增关注',
'deal7dpct': '一周交易分享增长率',
'deal7d': '一周新增交易分享数',
'tweet7dpct': '一周讨论增长率',
'tweet': '累计讨论次数',
'follow7dpct': '一周关注增长率',
'follow': '累计关注人数',
'tweet7d': '一周新增讨论数',
'pct': '当日涨跌幅',
'pct5': '近5日涨跌幅',
'pct60': '近60日涨跌幅',
'amount': '当日成交额',
'chgpct': '当日振幅',
'pct20': '近20日涨跌幅',
'pct120': '近120日涨跌幅',
'pct250': '近250日涨跌幅',
'volume': '本日成交量',
'current': '当前价',
'volume_ratio': '当日量比',
'pct_current_year': '年初至今涨跌幅',
'pct10': '近10日涨跌幅',
'tr': '当日换手率',
'npana': '扣非净利润',
'bc': '营业成本',
'np': '归属于母公司所有者的净利润',
'npt': '净利润',
'tbc': '营业总成本',
'bi': '营业收入',
'mg': '少数股东损益归属于母公司所有者的净利润',
'tbi': '营业总收入',
'tp': '利润总额',
'bp': '营业利润',
'psf': '每股净资产',
'ocps': '每股经营现金流',
'upps': '每股未分配利润',
'beps': '扣除非经常性损益后的每股收益',
'epsdiluted': '每股收益',
'epsweighted': '稀释每股收益',
'csps': '每股资本公积金',
'prec': '预收款项',
'cur': '货币资金',
'cl': '流动负债',
'nca': '非流动资产',
'ta': '资产合计',
'pay': '应付账款',
'csps': '资本公积',
'inv': '存货',
'eq': '归属于母公司股东权益',
'rec': '应收账款',
'li': '长期投资',
'ncl': '非流动负债',
'me': '少数股东权益',
'tl': '负债合计',
'ia': '无形资产',
'the': '股东权益合计',
'fa': '固定资产',
'up': '未分配利润',
'ca': '流动资产',
'goodwill': '商誉',
'bncf': '经营活动产生的现金流量净额',
'fcb': '期末现金及现金等价物余额',
'cnr': '现金及现金等价物净增加额',
'incf': '投资活动产生的现金流量净额',
'fncf': '筹资活动产生的现金流量净额'
}
stock_basic_df = pd.read_csv('stock20210918.csv', index_col=0)
stock_basic_df.rename(columns=indicators,inplace=True)
show(stock_basic_df, settings={'block': True, 'theme': 'dark'})
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jiangrz/flower",
"score": 2
} |
#### File: tests/views/test_dashboard.py
```python
import time
from tests import AsyncHTTPTestCase
from tests.utils import task_succeeded_events, task_failed_events
from tests.utils import HtmlTableParser
from celery.events import Event
from celery.utils import uuid
from flower.events import EventsState
class DashboardTests(AsyncHTTPTestCase):
def setUp(self):
self.app = super(DashboardTests, self).get_app()
super(DashboardTests, self).setUp()
def get_app(self):
return self.app
def test_default_page(self):
r1 = self.get('/')
r2 = self.get('/dashboard')
self.assertEqual(r1.body, r2.body)
def test_no_workers(self):
r = self.get('/dashboard')
self.assertEqual(200, r.code)
self.assertIn('Load Average', str(r.body))
self.assertNotIn('<tr id=', str(r.body))
def test_unknown_worker(self):
r = self.get('/worker/unknown')
self.assertEqual(404, r.code)
self.assertIn('Unknown worker', str(r.body))
def test_single_workers_offline(self):
state = EventsState()
state.get_or_create_worker('worker1')
state.event(Event('worker-online', hostname='worker1',
local_received=time.time()))
state.event(Event('worker-offline', hostname='worker1',
local_received=time.time()))
self.app.events.state = state
r = self.get('/dashboard')
table = HtmlTableParser()
table.parse(str(r.body))
self.assertEqual(200, r.code)
self.assertEqual(1, len(table.rows()))
self.assertTrue(table.get_row('worker1'))
self.assertEqual(['worker1', 'False', '0', '0', '0', '0', '0', None],
table.get_row('worker1'))
self.assertFalse(table.get_row('worker2'))
def test_single_workers_online(self):
state = EventsState()
state.get_or_create_worker('worker1')
state.event(Event('worker-online', hostname='worker1',
local_received=time.time()))
self.app.events.state = state
r = self.get('/dashboard')
table = HtmlTableParser()
table.parse(str(r.body))
self.assertEqual(200, r.code)
self.assertEqual(1, len(table.rows()))
self.assertTrue(table.get_row('worker1'))
self.assertEqual(['worker1', 'True', '0', '0', '0', '0', '0', None],
table.get_row('worker1'))
self.assertFalse(table.get_row('worker2'))
def test_task_received(self):
state = EventsState()
state.get_or_create_worker('worker1')
state.get_or_create_worker('worker2')
events = [Event('worker-online', hostname='worker1'),
Event('worker-online', hostname='worker2'),
Event('task-received', uuid=uuid(), name='task1',
args='(2, 2)', kwargs="{'foo': 'bar'}",
retries=0, eta=None, hostname='worker1')]
for i, e in enumerate(events):
e['clock'] = i
e['local_received'] = time.time()
state.event(e)
self.app.events.state = state
r = self.get('/dashboard')
table = HtmlTableParser()
table.parse(str(r.body))
self.assertEqual(200, r.code)
self.assertEqual(2, len(table.rows()))
self.assertEqual(['worker1', 'True', '0', '1', '0', '0', '0', None],
table.get_row('worker1'))
self.assertEqual(['worker2', 'True', '0', '0', '0', '0', '0', None],
table.get_row('worker2'))
def test_task_started(self):
state = EventsState()
state.get_or_create_worker('worker1')
state.get_or_create_worker('worker2')
events = [Event('worker-online', hostname='worker1'),
Event('worker-online', hostname='worker2'),
Event('task-received', uuid='123', name='task1',
args='(2, 2)', kwargs="{'foo': 'bar'}",
retries=0, eta=None, hostname='worker1'),
Event('task-started', uuid='123', hostname='worker1')]
for i, e in enumerate(events):
e['clock'] = i
e['local_received'] = time.time()
state.event(e)
self.app.events.state = state
r = self.get('/dashboard')
table = HtmlTableParser()
table.parse(str(r.body))
self.assertEqual(200, r.code)
self.assertEqual(2, len(table.rows()))
self.assertEqual(['worker1', 'True', '0', '1', '0', '0', '0', None],
table.get_row('worker1'))
self.assertEqual(['worker2', 'True', '0', '0', '0', '0', '0', None],
table.get_row('worker2'))
def test_task_succeeded(self):
state = EventsState()
state.get_or_create_worker('worker1')
state.get_or_create_worker('worker2')
events = [Event('worker-online', hostname='worker1'),
Event('worker-online', hostname='worker2'),
Event('task-received', uuid='123', name='task1',
args='(2, 2)', kwargs="{'foo': 'bar'}",
retries=0, eta=None, hostname='worker1'),
Event('task-started', uuid='123', hostname='worker1'),
Event('task-succeeded', uuid='123', result='4',
runtime=0.1234, hostname='worker1')]
for i, e in enumerate(events):
e['clock'] = i
e['local_received'] = time.time()
state.event(e)
self.app.events.state = state
r = self.get('/dashboard')
table = HtmlTableParser()
table.parse(str(r.body))
self.assertEqual(200, r.code)
self.assertEqual(2, len(table.rows()))
self.assertEqual(['worker1', 'True', '0', '1', '0', '1', '0', None],
table.get_row('worker1'))
self.assertEqual(['worker2', 'True', '0', '0', '0', '0', '0', None],
table.get_row('worker2'))
def test_task_failed(self):
state = EventsState()
state.get_or_create_worker('worker1')
state.get_or_create_worker('worker2')
events = [Event('worker-online', hostname='worker1'),
Event('worker-online', hostname='worker2'),
Event('task-received', uuid='123', name='task1',
args='(2, 2)', kwargs="{'foo': 'bar'}",
retries=0, eta=None, hostname='worker1'),
Event('task-started', uuid='123', hostname='worker1'),
Event('task-failed', uuid='123', exception="KeyError('foo')",
traceback='line 1 at main', hostname='worker1')]
for i, e in enumerate(events):
e['clock'] = i
e['local_received'] = time.time()
state.event(e)
self.app.events.state = state
r = self.get('/dashboard')
table = HtmlTableParser()
table.parse(str(r.body))
self.assertEqual(200, r.code)
self.assertEqual(2, len(table.rows()))
self.assertEqual(['worker1', 'True', '0', '1', '1', '0', '0', None],
table.get_row('worker1'))
self.assertEqual(['worker2', 'True', '0', '0', '0', '0', '0', None],
table.get_row('worker2'))
def test_task_retried(self):
state = EventsState()
state.get_or_create_worker('worker1')
state.get_or_create_worker('worker2')
events = [Event('worker-online', hostname='worker1'),
Event('worker-online', hostname='worker2'),
Event('task-received', uuid='123', name='task1',
args='(2, 2)', kwargs="{'foo': 'bar'}",
retries=0, eta=None, hostname='worker1'),
Event('task-started', uuid='123', hostname='worker1'),
Event('task-retried', uuid='123', exception="KeyError('bar')",
traceback='line 2 at main', hostname='worker1'),
Event('task-failed', uuid='123', exception="KeyError('foo')",
traceback='line 1 at main', hostname='worker1')]
for i, e in enumerate(events):
e['clock'] = i
e['local_received'] = time.time()
state.event(e)
self.app.events.state = state
r = self.get('/dashboard')
table = HtmlTableParser()
table.parse(str(r.body))
self.assertEqual(200, r.code)
self.assertEqual(2, len(table.rows()))
self.assertEqual(['worker1', 'True', '0', '1', '1', '0', '1', None],
table.get_row('worker1'))
self.assertEqual(['worker2', 'True', '0', '0', '0', '0', '0', None],
table.get_row('worker2'))
def test_tasks(self):
state = EventsState()
state.get_or_create_worker('worker1')
state.get_or_create_worker('worker2')
state.get_or_create_worker('worker3')
events = [Event('worker-online', hostname='worker1'),
Event('worker-online', hostname='worker2')]
for i in range(100):
events += task_succeeded_events(worker='worker1')
for i in range(10):
events += task_succeeded_events(worker='worker3')
for i in range(13):
events += task_failed_events(worker='worker3')
for i, e in enumerate(events):
e['clock'] = i
e['local_received'] = time.time()
state.event(e)
self.app.events.state = state
r = self.get('/dashboard')
table = HtmlTableParser()
table.parse(str(r.body))
self.assertEqual(200, r.code)
self.assertEqual(3, len(table.rows()))
self.assertEqual(['worker1', 'True', '0', '100', '0', '100', '0', None],
table.get_row('worker1'))
self.assertEqual(['worker2', 'True', '0', '0', '0', '0', '0', None],
table.get_row('worker2'))
self.assertEqual(['worker3', 'True', '0', '23', '13', '10', '0', None],
table.get_row('worker3'))
``` |
{
"source": "Jiangshan00001/django_amis_render",
"score": 2
} |
#### File: django_amis_render/django_amis_render/admin.py
```python
from django.contrib import admin
from .models import AmisRenderList, AmisRenderApp
from django.utils.html import format_html
from django.urls import path
from django.http import HttpResponse,HttpResponseRedirect
from django.shortcuts import render
from .amis_update import update_amis_editor_to_local, update_amis_local_to_editor_one_file, get_amis_json_file_path, get_amis_json_file_content
# Register your models here.
from .auto_add import auto_add, auto_del
from .auto_add_app import auto_add_app, update_read_from_autourls, update_write_to_autourls,update_auto_read_write
class AmisRenderAppAdmin(admin.ModelAdmin):
change_list_template = "html/auto_add_app_list.html"
def get_urls(self):
urls = super().get_urls()
my_urls = [
path('auto_add_app_action/', self.auto_add_app_action),
path('update_app_auto/<int:id>/', self.update_auto),
path('update_app_read/<int:id>/', self.update_read),
path('update_app_write/<int:id>/', self.update_write),
]
return my_urls + urls
def update_auto(self, request, id):
print('update_auto', id)
apps = AmisRenderApp.objects.filter(id=id).all()
if len(apps)>0:
msg = update_auto_read_write(apps[0].app_name)
self.message_user(request, msg)
return HttpResponseRedirect("../../")
def update_read(self, request, id):
print('update read', id)
apps = AmisRenderApp.objects.filter(id=id).all()
if len(apps)>0:
msg = update_read_from_autourls(apps[0].app_name, 1)
self.message_user(request, msg)
return HttpResponseRedirect("../../")
def update_write(self, request, id):
print('update write', id)
apps = AmisRenderApp.objects.filter(id=id).all()
if len(apps)>0:
msg = update_write_to_autourls(apps[0].app_name)
self.message_user(request, msg)
return HttpResponseRedirect("../../")
def auto_add_app_action(self, request):
add_cnt = auto_add_app()
self.message_user(request, str(add_cnt))
return HttpResponseRedirect("../")
def button_link_auto(self, obj):
if obj.page_count is None:
return ''
button_html = """<a href="update_app_auto/%s/" > 自动更新 </a>"""%obj.id
return format_html(button_html)
def button_link_read(self, obj):
if obj.page_count is None:
return ''
button_html = """<a href="update_app_read/%s/" > 读 </a>""" % obj.id
return format_html(button_html)
def button_link_write(self, obj):
if obj.page_count is None:
return ''
button_html = """<a href="update_app_write/%s/" > 写 </a>""" % obj.id
return format_html(button_html)
button_link_auto.short_description = "自动"
button_link_read.short_description = "从auto_urls.py读出"
button_link_write.short_description = "写入auto_urls.py"
list_display = ['id', 'app_name', 'page_count','button_link_auto', 'button_link_read', 'button_link_write' ]
class AmisRenderListAdmin(admin.ModelAdmin):
change_list_template = "html/auto_add_list.html"
list_filter = ('app_name',)
def get_urls(self):
urls = super().get_urls()
my_urls = [
path('auto_add_action/', self.auto_add_action),
path('auto_del_action/', self.auto_del_action),
path('update_amis_editor_to_local/', self.amis_editor_to_local),
path('amis_to_editor/<int:id>/', self.amis_to_editor),
path('amis_to_editor_and_jump/<int:id>/', self.amis_to_editor_and_jump),
path('amis_edit_api/<int:id>/', self.amis_edit_api),
]
return my_urls + urls
def amis_edit_api(self, request, id):
arl = AmisRenderList.objects.get(id=id)
app_name = arl.app_name
url_name = arl.url_name
file_path= arl.file_path
id = arl.id
amis_json_file_path = get_amis_json_file_path(id)
cont = get_amis_json_file_content(amis_json_file_path)
return render(request, "amis_edit_api.html", context={'file_path':file_path,'url_name':url_name, 'id':id,'file_content':cont})
def amis_to_editor(self, request, id):
return update_amis_local_to_editor_one_file(id)
def amis_to_editor_and_jump(self, id):
update_amis_local_to_editor_one_file(id)
return HttpResponseRedirect("/static/amis-editor-demo/index.html")
def amis_editor_to_local(self, request):
"""
数据从前端传过来,在request.POST中,需要保存到对应的文件中
eg:<QueryDict: {'store': ['{"pages":[{"id":"1","icon":"","path":"jihua","label":"jihua.json","schema":{"body":[{"type":"input-email","label":"邮箱","name":"email"}],"type":"page"}}],"theme":"default","asideFixed":true,"asideFolded":false,"offScreen":false,"addPageIsOpen":false,"preview":false,"isMobile":false,"schema":{"body":[{"type":"input-email","label":"邮箱","name":"email"}],"type":"page"}}']}>
"""
update_amis_editor_to_local(request)
print('update_amis_editor_to_local', request)
return HttpResponseRedirect("../")
def auto_add_action(self, request):
add_cnt = auto_add()
self.message_user(request, "自动添加文件数:"+str(add_cnt))
return HttpResponseRedirect("../")
def auto_del_action(self, request):
add_cnt = auto_del()
self.message_user(request, "自动删除已经不存在文件的路径数:" + str(add_cnt))
return HttpResponseRedirect("../")
def no_find_url_name_message(self, request, id):
self.message_user('未找到路径。请查看:APP:', AmisRenderList.app_name,'是否有在全局urls.py中注册urls,并且app内部的urls里有写 from .auto_urls import *')
return HttpResponseRedirect("../../")
def button_link(self, obj):
from django.urls import reverse
try:
app_and_url_name =str(obj.app_name)+':'+str(obj.url_name)
link_to = reverse(str(obj.url_name), current_app=obj.app_name)
button_html = """<a class="changelink" href="%s">打开</a>""" % (link_to)
except Exception as e:
link_to = ''
button_html = """<a >未找到页面 %s. 请确认%s.urls.py设置到项目的urls.py中,且%s.urls.py 里面有 from .auto_urls import *</a>"""%(app_and_url_name, str(obj.app_name), str(obj.app_name))
return format_html(button_html)
button_link.short_description = "打开页面"
def button_link_edit(self, obj):
button_html = """<input type="button" value="编辑" onclick="update_amis_local_to_editor(%s)" />"""%obj.id
return format_html(button_html)
button_link_edit.short_description = "编辑组件"
def button_link_edit_api(self, obj):
# 修改内部各组件的api initApi等
button_html = """<a class="changelink" href="amis_edit_api/%s/">编API</a>""" % (obj.id,)
return format_html(button_html)
button_link_edit_api.short_description = "编辑API"
#'file_path',
list_display = ['id','app_name', 'page_url', 'file_type', 'button_link_edit','button_link_edit_api','button_link']
#actions = [make_published]
class Media:
js = ('django_amis_render/jquery-3.6.0.min.js', 'django_amis_render/add_button.js')
admin.site.register(AmisRenderList, AmisRenderListAdmin)
admin.site.register(AmisRenderApp, AmisRenderAppAdmin)
``` |
{
"source": "Jiangshan00001/django_rest_admin",
"score": 2
} |
#### File: django_rest_admin/django_rest_admin/get_app_url_base.py
```python
__author__ = "songjiangshan"
__copyright__ = "Copyright (C) 2021 songjiangshan \n All Rights Reserved."
__license__ = ""
__version__ = "1.0"
from django.urls import reverse
def get_app_url_base(url_name='django_rest_admin_default_list'):
front_path = reverse(url_name)
if front_path[-1]=='/':
front_path=front_path[:-1]
pos = front_path.rfind('/')
front_path=front_path[:pos]
return front_path
```
#### File: django_rest_admin/migrations/0001_initial.py
```python
from django.db import migrations, models
import django.db.models.deletion
import django_rest_admin.model_field
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='RouteExec',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('table_name', models.TextField(blank=True, help_text='the table name. eg: danwei. ONLY needed if inspected_from_db=1', null=True)),
('inspected_from_db', models.IntegerField(blank=True, help_text='是否需要将库中表导入为django的model。1,0. eg: 0 ', null=True)),
('is_managed', models.TextField(blank=True, help_text='导出数据的managed属性. True或False。只有在inspected_from_db=1时才有效', null=True)),
('route', models.TextField(blank=True, help_text='the route name. eg: /Danwei', null=True)),
('table_big_name', models.TextField(blank=True, help_text='the model name of a table. eg: Danwei', null=True)),
('ordering_fields', django_rest_admin.model_field.JSONField(blank=True, help_text='可用排序字段,数组,每个项是字符串. eg:["id", "name"]', null=True)),
('ordering', django_rest_admin.model_field.JSONField(blank=True, help_text='默认排序字段,数组,每个项是字符串. eg:["id", "name"]', null=True)),
('import_py_code', models.TextField(blank=True, help_text='needed if the model need to import from somewhere else. eg:from django.auth import User', null=True)),
('foreign_key_ro', django_rest_admin.model_field.JSONField(blank=True, help_text='外键关联的只读内容. eg: {"article_name":"article.name"}', null=True)),
('no_need_login', models.IntegerField(blank=True, help_text='访问是否需要登陆。1,0. eg: 0 ', null=True)),
('search_fields', django_rest_admin.model_field.JSONField(blank=True, help_text='可用排序字段,数组,每个项是字符串. eg: ["name", "zhujima"]', null=True)),
('model_object_list', django_rest_admin.model_field.JSONField(blank=True, help_text='可显示字段. eg: ["id", "name", "daima"]', null=True)),
('filter_keys', django_rest_admin.model_field.JSONField(blank=True, help_text='过滤项. eg: [{"filter_name": "name", "field_name": "name", "filter_type": "text", "lookup_expr": "icontains"}]', null=True)),
('foreign_slug_kf', django_rest_admin.model_field.JSONField(blank=True, help_text='一对多外键关联的字段列表. eg: {"StatKucun_wuliao": ["kucun", "jinjia"]}', null=True)),
('foreign_key_id', django_rest_admin.model_field.JSONField(blank=True, help_text='此属性只读,无需修改。 外键定义.关联删除属性:', null=True)),
('params', django_rest_admin.model_field.JSONField(blank=True, help_text='AUTO-GENERATED FIELD. DO NOT EDIT THIS MANUALLY.', null=True)),
('desc', models.TextField(blank=True, help_text='表功能描述. eg: this is the rest description', null=True)),
],
options={
'verbose_name_plural': 'table-REST-CRUD',
'db_table': 'rest_admin_table_crud',
'managed': True,
},
),
migrations.CreateModel(
name='ComputedField',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('func_text', django_rest_admin.model_field.CodeField(blank=True, help_text='py函数.eg: def jine(self):return 0', null=True)),
('route_exec', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_rest_admin.routeexec')),
],
options={
'verbose_name_plural': '计算函数字段',
'db_table': 'rest_admin_table_crud_computed_field',
'managed': True,
},
),
]
```
#### File: django_rest_admin/django_rest_admin/my_rest_api.py
```python
__author__ = "songjiangshan"
__copyright__ = "Copyright (C) 2021 songjiangshan \n All Rights Reserved."
__license__ = ""
__version__ = "1.0"
import json
from django.shortcuts import render
from rest_framework.response import Response
from rest_framework import viewsets
from rest_framework import serializers
from rest_framework import routers
from django_filters import rest_framework as filters
from rest_framework import filters as rest_framework_filter
import django_filters
import django_filters.rest_framework
'''
封装 对表的增删改查 rest_api
'''
# curr_view = my_rest_apiB(PickNumber, 'PickNumber', ["id", 'team_name', 'pick_number', 'team'], {'team_name':'team.name'}, {'team':Team})
def my_rest_viewsetB(model_object,model_class_name, model_obj_list='__all__', foreign_key_ro={}, foreign_key_id={}, filter_fieldsA=(),
no_need_login = False, optional_fields1=[], search_fieldsA=[], orderingA=[], ordering_fieldsA=[], filter_keys=[], foreign_slug_kf={}):
"""
:param model_object: django的数据模型
:param model_class_name:数据模型的名称,用于生成代码名称,一般是model_object的字符串
:param model_obj_list: 可查看的字段列表['id','name']. 可以是 '__all__'代表所有字段
:param foreign_key_ro:只读外键内容:{'team_name': 'team.name', }
:param foreign_key_id: 只读外键类型{'team':Team}
:param filter_fields: 可搜索字段 ['id','team','name']
:param no_need_login: 是否需要登录认证
:param optional_fields1: 字段是否是可选,用于数据库写入设置
:return:
"""
if filter_keys is None:
filter_keys=[]
if (foreign_key_ro is not None) and (len(foreign_key_ro)>0) and (model_obj_list is not None) and (model_obj_list!='__all__'):
for i in foreign_key_ro:
if i not in model_obj_list:
model_obj_list.append(i)
if (foreign_key_id is not None) and (len(foreign_key_id) > 0) and (model_obj_list is not None) and (
model_obj_list != '__all__'):
for i in foreign_key_id:
if i not in model_obj_list:
model_obj_list.append(i)
def add_foreign_serializer(ModName, fields_list):
class TrackSerializer(serializers.ModelSerializer):
class Meta:
model = globals()[ModName]
fields = fields_list
TrackSerializer.__name__=ModName+"_in_"+model_class_name
return TrackSerializer
def AddVarFunc(self, foreign_key_ro, foreign_key_id, foreign_slug_kf):
if foreign_key_ro is not None:
for i in foreign_key_ro:
#setattr(self, i, serializers.CharField(source=foreign_key_ro[i], read_only=True))
self[i]=serializers.CharField(source=foreign_key_ro[i], read_only=True)
if foreign_key_id is not None:
for i in foreign_key_id:
#setattr(self, i,
# serializers.PrimaryKeyRelatedField(queryset=foreign_key_id[i].objects.all(), read_only=False))
self[i]=serializers.PrimaryKeyRelatedField(queryset=foreign_key_id[i].objects.all(), read_only=False, required=False, allow_null=True)
if foreign_slug_kf is not None:
for i in foreign_slug_kf:
self[i] = add_foreign_serializer(i.split('_')[0], foreign_slug_kf[i])(many=True, read_only=True)
#self[i] = serializers.SlugRelatedField( many=True,
# read_only=True,
# slug_field=foreign_slug_kf[i])
return 'A'
class TeamPickNumberSerializer(serializers.ModelSerializer):
useless_cc = AddVarFunc(locals(), foreign_key_ro, foreign_key_id, foreign_slug_kf)
#team_name = serializers.CharField(source='team.name', read_only=True) # team_name 用于页面显示文字
#team = serializers.PrimaryKeyRelatedField(queryset=model_object.objects.all(), read_only=False) # team_id 用于修改数据库
class Meta:
model = model_object
fields = model_obj_list
#fields = '__all__'
extra_kwargs = {i: {"required": False, "allow_null": True} for i in optional_fields1}
class GoodsFilter(django_filters.rest_framework.FilterSet):
for i in filter_keys:
if ('filter_type' not in i) or ('filter_name' not in i) or ('field_name' not in i) or (
'lookup_expr' not in i):
print('WARN:filter no type will not effect', i, model_class_name)
continue
if i['filter_type'] == 'number':
locals()[i['filter_name']] = django_filters.NumberFilter(field_name=i['field_name'], lookup_expr=i['lookup_expr'])
elif i['filter_type'] == 'text':
locals()[i['filter_name']] = django_filters.CharFilter(field_name=i['field_name'], lookup_expr=i['lookup_expr'])
elif i['filter_type'] == 'bool':
locals()[i['filter_name']] = django_filters.BooleanFilter(field_name=i['field_name'], lookup_expr=i['lookup_expr'])
elif i['filter_type'] == 'date_range':
locals()[i['filter_name']] = django_filters.DateFromToRangeFilter(field_name=i['field_name'], lookup_expr='range')
elif i['filter_type'] == 'time_range':
locals()[i['filter_name']] = django_filters.TimeRangeFilter(field_name=i['field_name'],
lookup_expr='range')
elif i['filter_type'] == 'date':
locals()[i['filter_name']] = django_filters.DateFilter(field_name=i['field_name'], lookup_expr=i['lookup_expr'])
elif i['filter_type'] == 'time':
locals()[i['filter_name']] = django_filters.TimeFilter(field_name=i['field_name'], lookup_expr=i['lookup_expr'])
elif i['filter_type'] == 'datetime':
locals()[i['filter_name']] = django_filters.DateTimeFilter(field_name=i['field_name'], lookup_expr=i['lookup_expr'])
elif i['filter_type'] == 'isodatetime':
locals()[i['filter_name']] = django_filters.IsoDateTimeFilter(field_name=i['field_name'],
lookup_expr=i['lookup_expr'])
else:
print('WARN: unknown filter type:', i['filter_type'], model_class_name, i)
class Meta:
model = model_object
fields = [fk1['filter_name'] for fk1 in filter_keys]
GoodsFilter.__name__= model_class_name+'SFilter'
class TeamPickNumberView(viewsets.ModelViewSet):
filterset_class = GoodsFilter
#filter_fields = filter_fieldsA
queryset = model_object.objects.all()
serializer_class = TeamPickNumberSerializer
filter_backends = [django_filters.rest_framework.DjangoFilterBackend,rest_framework_filter.SearchFilter, rest_framework_filter.OrderingFilter]#,rest_framework_filter.SearchFilter, rest_framework_filter.OrderingFilter)
search_fields = search_fieldsA
ordering = orderingA
ordering_fields = ordering_fieldsA
if no_need_login:
# print('no_need_login for:', model_class_name)
TeamPickNumberView.authentication_classes = ()
TeamPickNumberView.permission_classes = ()
TeamPickNumberView.__name__= model_class_name+'View'
TeamPickNumberSerializer.__name__ = model_class_name+'Serial'
return TeamPickNumberView
from rest_framework.renderers import JSONRenderer
class EmberJSONRenderer(JSONRenderer):
def render(self, data, accepted_media_type=None, renderer_context=None):
data = {'status':0,'msg':'', 'data':{ 'items': data}}
return super(EmberJSONRenderer, self).render(data, accepted_media_type, renderer_context)
```
#### File: django_rest_admin/django_rest_admin/update_models.py
```python
__author__ = "songjiangshan"
__copyright__ = "Copyright (C) 2021 songjiangshan \n All Rights Reserved."
__license__ = ""
__version__ = "1.0"
import io
from .models import RouteExec, ComputedField
import json
from django.http import HttpResponse
def parse_params(params_str):
ret_dict = {}
if params_str is None:
return ret_dict
if len(params_str) == 0:
return ret_dict
if isinstance(params_str, dict):
return params_str
try:
ret_dict = json.loads(params_str)
except Exception as e:
print(e)
print('param parse error')
print(params_str)
return ret_dict
def foreign_key_gen(table_name, related_name, del_type='CASCADE'):
"""
del_type: CASCADE SET_NULL DO_NOTHING
"""
return "models.ForeignKey(to=" + table_name + ", db_constraint=False, on_delete=models."+del_type+", blank=True, null=True, related_name='" + related_name + "')"
def str_to_obj(obj):
if obj is None:
return obj
return json.loads(obj)
def params_foreign_key_id_update(one_r):
if one_r['foreign_key_id'] is None:
return one_r
if one_r['foreign_key_id'] == '':
return one_r
fk = str_to_obj(one_r['foreign_key_id'])
need_save = 0
for i in fk:
if isinstance(fk[i], str):
fk[i] = [fk[i], 'CASCADE']
need_save = 1
if need_save:
one_r['foreign_key_id'] = json.dumps(fk, indent=2)
return one_r
def params_update_list(all_one_list):
all_one_list2=[]
for i in all_one_list:
i=params_updata(i)
all_one_list2.append(i)
return all_one_list2
def params_updata(one_r: dict):
one_r = params_foreign_key_id_update(one_r)
params = {
'foreign_key_id': str_to_obj(one_r['foreign_key_id']),
'foreign_key_ro': str_to_obj(one_r['foreign_key_ro']),
'foreign_slug_kf': str_to_obj(one_r['foreign_slug_kf']),
'ordering_fields': str_to_obj(one_r['ordering_fields']),
'ordering': str_to_obj(one_r['ordering']),
'no_need_login': one_r['no_need_login'],
'search_fields': str_to_obj(one_r['search_fields']),
'filter_keys': str_to_obj(one_r['filter_keys']),
'model_object_list': str_to_obj(one_r['model_object_list'])
}
for i in list(params.keys()):
if params[i] is None:
del params[i]
one_r['params'] = json.dumps(params)
return one_r
def update_models(all_rest_dict_list):
"""
更新模型文件
1 读取routeexec种的表名,使用命令inspected生成默认表结构
2 替换foreign_key 字段
3 去掉id字段。id如果存在,会导致django程序错误。针对部分表导出后有id的问题
"""
from django.core.management import call_command
from django.conf import settings
import os
path1 = os.path.join(settings.BASE_DIR, settings.DJANGO_REST_ADMIN_TO_APP)
file_name = os.path.join(path1,'models.py')
print(file_name)
#保存inspectdb数据到stringio.
f = io.StringIO()
f.write("from django.contrib.auth.models import User\n")
#f.write("from AmisNavigationBar.models import AmisNavigation\n")
table_list = []
for i in all_rest_dict_list:
if i['inspected_from_db'] !=1:
continue
table_list.append(i['table_name'])
if i['import_py_code'] is not None:
f.write(i['import_py_code']+'\n')
#去除重复的table
table_list = list(set(table_list))
if len(table_list) > 0:
call_command("inspectdb", table_list, stdout=f)
else:
f.write('#no table exist in route_exec\n')
# 所有数据读出到字符串变量:models_new
models_new = f.getvalue()
f.close()
# 此变量用户下面的model外键更改
# key:className
# value:{ foreign_key_id中的key _id : foreign_key_id_value }
foreign_key_dict2 = {}
for one_r in all_rest_dict_list:
# if one_r.re_type == 'table':
#one_r = params_updata(one_r)
params = parse_params(one_r['params'])
if one_r['inspected_from_db'] != 1:
continue
if 'foreign_key_id' in params:
foreign_key_dict2[one_r['table_big_name']] = {}
for k in params['foreign_key_id']:
foreign_key_dict2[one_r['table_big_name']][k] = params['foreign_key_id'][k]
print('update_models foreign_key_dict2:', foreign_key_dict2)
all_rest_dict_dict={i['table_big_name']:i for i in all_rest_dict_list}
# 当前model名
curr_class_name = ''
f2 = open(file_name, 'w+')
for one_line in models_new.split('\n'):
#每行分析处理
if len(one_line.strip()) == 0:
# 空行
f2.write(one_line + "\n")
continue
one_line_start_space = len(one_line) - len(one_line.lstrip())
one_line_striped = one_line.strip()
if one_line_striped[0] == '#':
# 注释
f2.write(one_line + "\n")
continue
spt = one_line_striped.split(' ')
if len(spt) == 0:
# 没有空格,不认识的行??
f2.write(one_line + "\n")
continue
if (one_line_start_space==0) and (spt[0] == 'class'):
# 获取类名
curr_class_name = spt[1].split('(')[0]
f2.write(one_line + "\n")
continue
elif spt[0] == 'class':
#内部类,在内部类之前,先放入计算属性值
re1 = RouteExec.objects.filter(table_big_name=curr_class_name, inspected_from_db=1).all()
if len(re1)<1:
print('skip this table',curr_class_name, len(re1))
f2.write(one_line + "\n")
continue
cf = ComputedField.objects.filter(route_exec=re1[0]).all()
for cfi in cf:
f2.write(' ' * one_line_start_space+cfi.func_text.replace('\r\n','\n'))
f2.write(' ' * one_line_start_space+"\n")
f2.write(one_line + "\n")
continue
curr_field_name = spt[0]
if curr_field_name=='id':
#id自动去除,避免djanog错误
f2.write(' ' * one_line_start_space )
f2.write('#' + one_line)
f2.write('\n')
continue
# managed处理
if curr_field_name == 'managed' and len(spt) == 3:
if curr_class_name not in all_rest_dict_dict:
#此class未找到,直接忽略
print('django_rest_admin:skip class:', curr_class_name)
f2.write(one_line + "\n")
continue
if 'is_managed' not in all_rest_dict_dict[curr_class_name]:
# 此class未找到,直接忽略
print('django_rest_admin:skip class2:', curr_class_name)
f2.write(one_line + "\n")
continue
curr_param = all_rest_dict_dict[curr_class_name]['is_managed']
if curr_param=='True':
# 不是需要的字段,直接复制
f2.write(' ' * one_line_start_space + "managed = True\n")
else:
# 不是需要的字段,直接复制
f2.write(' ' * one_line_start_space + "managed = False\n")
continue
if curr_class_name not in foreign_key_dict2:
# 当前class没有外键
f2.write(one_line + "\n")
continue
# 当前class的所有外键
foreign_key_dict = foreign_key_dict2[curr_class_name]
# 当前字段是外键
if curr_field_name in foreign_key_dict:
# 此行要替换为外键代码
f2.write(' ' * one_line_start_space)
f2.write(curr_field_name.replace('_id', ''))
f2.write(' = ')
print(curr_class_name, curr_field_name, foreign_key_dict[curr_field_name][0])
if foreign_key_dict[curr_field_name][0] == curr_class_name:
itable_name = '"self"'
else:
itable_name = foreign_key_dict[curr_field_name][0]
irelated_name = curr_class_name +'_'+ curr_field_name.replace('_id', '')
print('irelated_name', irelated_name)
idel_typ = foreign_key_dict[curr_field_name][1]
f2.write(foreign_key_gen(itable_name, irelated_name, idel_typ))
f2.write('\n')
continue
f2.write(one_line + "\n")
continue
# model写完。此处添加receiver
file_name_receiver = os.path.join(path1,'models_receiver.py')
if os.path.exists(file_name_receiver):
f_recv = open(file_name_receiver, 'r')
f2.write(f_recv.read())
f2.close()
return 'ok'
``` |
{
"source": "Jiangshan00001/easyeda_to_pads",
"score": 2
} |
#### File: easyeda_to_pads/src/easy_to_pads.py
```python
__author__ = "songjiangshan"
__copyright__ = "Copyright (C) 2021 songjiangshan \n All Rights Reserved."
__license__ = ""
__version__ = "1.0"
import datetime
from pads_ascii import PadsAscii
def decl_add_shape_text(dshape, easy, pads: PadsAscii):
"""
:param dshape:
:param easy:
:param pads:
:return:
"""
package_decl_name = easy.pDetail['decl_name']
if dshape['layer_id'] == easy.TOP_SILK_LAYER: # 顶层丝印层
pads.add_txt(package_decl_name, text=dshape['text'], x=dshape['x'], y=dshape['y'], rotation=dshape['rotation'],
layer=pads.TOP_SILK_LAYER, height=dshape['font_size'],
width=50 if dshape['font_size']>50 else dshape['font_size'], mirror=dshape['mirror'],
fontinfo="\"Regular 宋体\"")
def decl_add_shape_rect(dshape, easy, pads):
package_decl_name = easy.pDetail['decl_name']
if dshape['layer_id'] == easy.TOP_SILK_LAYER: # 顶层丝印层
pads.add_pieces(package_decl_name, type='CLOSED', numcoord=5,
width=dshape['stroke_width'],
layer=pads.TOP_SILK_LAYER, linestyle=-1, coord_list=[[dshape['x'], dshape['y']],
[dshape['x'],
round(dshape['y'] - dshape['height'], 5)],
[round(dshape['x'] + dshape['width'], 5),
round(dshape['y'] - dshape['height'], 5)],
[round(dshape['x'] + dshape['width'], 5),
round(dshape['y'], 5)],
[dshape['x'], dshape['y']],
])
def decl_add_shape_circle(dshape, easy, pads):
package_decl_name = easy.pDetail['decl_name']
if dshape['layer_id'] == easy.TOP_SILK_LAYER: # 顶层丝印层
pads.add_pieces(package_decl_name, type='CIRCLE', numcoord=2,
width=dshape['stroke_width'],
layer=pads.TOP_SILK_LAYER, linestyle=-1,
coord_list=[[round(dshape['cx'] + dshape['r'], 5), dshape['cy']],
[round(dshape['cx'] - dshape['r'], 5), dshape['cy']]])
def decl_add_shape_arc(dshape, easy, pads):
package_decl_name = easy.pDetail['decl_name']
if dshape['layer_id'] == easy.TOP_SILK_LAYER: # 顶层丝印层
pass
# easy:
# path_string --M329,274 A26.95,26.95 0 0 1 370,309
# arc (rx ry x-axis-rotation large-arc-flag sweep-flag x y)+
# Draws an elliptical arc from the current point to (x, y).
# The size and orientation of the ellipse are defined by two radii (rx, ry)
# and an x-axis-rotation, which indicates how the ellipse as a whole is
# rotated relative to the current coordinate system.
# The center (cx, cy) of the ellipse is calculated automatically
# to satisfy the constraints imposed by the other parameters.
# large-arc-flag and sweep-flag contribute to the automatic
# calculations and help determine how the arc is drawn.
# pads format:
# Start_point start_angle*10 delta_angle*10 circle_xy1, circle_xy2
# End_point
# OPEN 2 10 26 -1
# -488.19 889.76 1669 -2438 -496.47 499.59 139.63 1135.69
# -106.3 507.87
#
#
# pads.add_pieces(package_decl_name, type='OPEN', numcoord=2,
# width=dshape['stroke_width'],
# layer=26, linestyle=-1, coord_list=[[round(dshape['cx'] + dshape['r'], 5), dshape['cy']],
# [round(dshape['cx'] - dshape['r'], 5), dshape['cy']]])
def decl_add_shape_track(dshape, easy, pads):
package_decl_name = easy.pDetail['decl_name']
if dshape['layer_id'] == easy.TOP_SILK_LAYER: # 顶层丝印层
pads.add_pieces(package_decl_name, type='OPEN', numcoord=len(dshape['points']),
width=dshape['stroke_width'],
layer=26, linestyle=-1, coord_list=dshape['points'])
def decl_add_shape_pad(dshape, easy, pads):
package_decl_name = easy.pDetail['decl_name']
a = pads
a.add_terminal(package_decl_name, dshape['number'], dshape['cx'], dshape['cy'], dshape['cx'], dshape['cy'])
numberlayers = 3
layer_list = None
if dshape['shape'] == 'RECT':
# rectangular finger pad
rotation = float(dshape['rotation'])
layer_shape = 'RF'
wh_swap = 0
ww = dshape['width']
hh = dshape['height']
if ww < hh:
ww, hh = hh, ww
wh_swap = 1
rotation = rotation + 89.998
if rotation > 180.0:
rotation -= 180.0
if rotation == 180.0:
rotation = 179.998
# layer width shape corner ori length offset
layer_one = [-2, hh,
layer_shape, 0, round(rotation, 3),
ww, 0]
elif dshape['shape'] == 'ELLIPSE':
# round pad
layer_shape = 'R'
# layer width shape
layer_one = [-2, dshape['height'], layer_shape]
elif dshape['shape'] == 'OVAL':
layer_shape = 'OF'
rotation = float(dshape['rotation'])
ww = dshape['width']
hh = dshape['height']
wh_swap = 0
if ww < hh:
wh_swap = 1
ww, hh = hh, ww
rotation = rotation + 89.998
if rotation > 180.0:
rotation -= 180.0
if rotation == 180.0:
rotation = 179.998
# layer width shape ori length offset
layer_one = [-2, hh, layer_shape,
round(rotation, 3), ww, 0]
elif dshape['shape'] == 'POLYGON':
# TODO: 对异形焊盘的支持??? DFN-5_L3.0-W3.0-P0.65-BL-MDV1595SU
# print('decl_add_shape_pad:polygon not supported:', dshape['shape'])
layer_shape = 'R'
hh = dshape['height']
ww = dshape['width']
# layer width shape
layer_one = [-2, round((ww + hh) / 20, 5), layer_shape]
# layer width shape corner ori length offset
# layer_one = [-2, hh, layer_shape, 0, rotation, ww, 0]
pads.add_pieces(package_decl_name, 'COPCLS', len(dshape['points']), '10', 1, int(dshape['number']) - 1,
dshape['points'])
else:
print('decl_add_shape_pad:unknown shape', dshape['shape'])
layer_shape = 'R'
layer_one = [-2, dshape['height'], layer_shape, 0, 0, dshape['width'], 0]
if easy.is_top_layer(dshape):
# 顶层,只有一层有焊盘
# -2 is the top layer
# -1 is all inner layers
# -0 is the bottom layer
layer_list = [layer_one,
[-1, 0, 'R'],
[0, 0, 'R']]
else:
# 多层,多层都一样的焊盘
layer_list = [layer_one,
[-1] + layer_one[1:],
[0] + layer_one[1:]]
if None in layer_list:
print('layer_list', layer_list)
a.add_pad_stack(package_decl_name, pin_number=dshape['number'], numberlayers=numberlayers,
layer_list=layer_list, plated=('P' if dshape['plated'] == 'Y' else 'N'),
drill=2 * dshape['hole_radius'], drlori='', drllen='', drloff='')
def decl_add_shape_solidregion(dshape, easy, pads):
package_decl_name = easy.pDetail['decl_name']
if dshape['layer_id'] == easy.DOCUMENT_LAYER: # 12-document 99-componentshapelayer.shape layer 100-leadshapelayer
pads.add_pieces(package_decl_name, type='OPEN', numcoord=len(dshape['points']),
width=5,
layer=26, linestyle=-1, coord_list=dshape['points'])
def easy_to_pads(easy, part_name, part_time, pads:PadsAscii):
package_decl_name = easy.pDetail['decl_name']
packageDetail = easy.packageDetailRaw
if part_name is not None:
if not pads.has_part(part_name):
pads.add_pcb_part(name=part_name, decl_name=package_decl_name, unit='I',
dt=datetime.datetime.fromtimestamp(part_time))
else:
print('part exist, skip', part_name)
if pads.has_decl(package_decl_name):
# print('decl exist. skip', package_decl_name)
return pads
pads.add_pcb_decal(name=easy.pDetail['decl_name'], unit='I',
originx=easy.pDetail['orgx'], originy=easy.pDetail['orgy'],
dt=easy.pDetail['updateTime'])
pads.add_pcb_decal_attrib_label(decal_name=easy.pDetail['decl_name'], attr_name='', rel_x = 0, rel_y = 0,
rotation=0, mirror=0, height=50, width=5,layer=26,
just=0, flags=33, fontinfo='Regular <Romansim Stroke Font>',
textstring='Comment')
pads.add_pcb_decal_attrib_label(decal_name=easy.pDetail['decl_name'], attr_name='', rel_x = 0, rel_y = 0,
rotation=0, mirror=0, height=50, width=5,layer=26,
just=0, flags=34, fontinfo='Regular <Romansim Stroke Font>',
textstring='REF-DES')
for i in easy.pDetail['shape']:
dshape = i
if dshape['type'] == 'CIRCLE':
decl_add_shape_circle(dshape, easy, pads)
elif dshape['type'] == 'TRACK':
decl_add_shape_track(dshape, easy, pads)
elif dshape['type'] == 'PAD':
decl_add_shape_pad(dshape, easy, pads)
elif dshape['type'] == 'SOLIDREGION':
decl_add_shape_solidregion(dshape, easy, pads)
elif dshape['type'] == 'ARC':
# TODO: ARC功能未实现
decl_add_shape_arc(dshape, easy, pads)
elif dshape['type'] == 'RECT':
decl_add_shape_rect(dshape, easy, pads)
elif dshape['type'] == 'TEXT':
decl_add_shape_text(dshape, easy, pads)
else:
pass
# print('unknown shape to pads', i)
return pads
```
#### File: easyeda_to_pads/src/pads_ascii.py
```python
__author__ = "songjiangshan"
__copyright__ = "Copyright (C) 2021 songjiangshan \n All Rights Reserved."
__license__ = ""
__version__ = "1.0"
class PadsAscii:
def __init__(self):
self.m_format = 'pcb_decals'
self.m_pcb_decals = {}
self.m_pad_parts = {}
self.TOP_LAYER = '1'
self.BOTTOM_LAYER = '2'
self.TOP_SILK_LAYER = '26'
self.BOTTOM_SILK_LAYER = '29'
self.TOP_PASTEM_LAYER = '23'
self.BOTTOM_PASTEM_LAYER = '22'
self.TOP_SOLDERM_LAYER = '21'
self.BOTTOM_SOLDERM_LAYER = '28'
# self.BOARDOUTLINE_LAYER=10
# self.MULTI_LAYER=11
# self.DOCUMENT_LAYER = 12
# self.LEAD_SHAPE_LAYER = 100
# self.COMP_SHAPE_LAYER = 101
self.MOUNT_LAYER = '-2'
self.INNER_LAYER = '-1'
self.OPPOSITE_LAYER = '0'
def set_format(self, format):
"""
:param format: line_items, sch_decals, pcb_decals, part_types
:return:
"""
self.m_format = format
def limit_decl_name(self, name):
"""
decl_name max 40 char
:param name:
:return:
"""
if len(name) > 40:
name = name[0:39]
name = name + '_'
return name
def limit_part_name(self, name: str):
# *?:@,
name = name.replace(',', '_').replace('@', '_').replace(':', '_').replace('?', '_').replace('*', '_').replace(' ', '_').replace('Ω±', '_').replace('%', '_')
name=name.replace('±','_').replace('(','_').replace(')','_').replace('Ω', '_').replace('±', '_')
return name
def get_start_of_file(self):
if self.m_format == 'line_items':
return '*PADS-LIBRARY-LINE-ITEMS-V9*'
elif self.m_format == 'sch_decals':
return '*PADS-LIBRARY-SCH-DECALS-V9*'
elif self.m_format == 'pcb_decals':
return '*PADS-LIBRARY-PCB-DECALS-V9*'
elif self.m_format == 'part_types':
return '*PADS-LIBRARY-PART-TYPES-V9*'
else:
return 'unknown format'
def get_eof(self):
return '*END*'
def add_pcb_part(self, name, decl_name, unit, dt, logfam='UND', attrs=0, gates=0, sigpins=0, pinmap=0, flag=0):
"""
:param name:Part type name. Values can be up to 40 alphanumeric characters.
:param decl_name:List of alternate PCB decal names, separated by colons name:name:…
A PCB decal name can be up to 40 alphanumeric characters. The list
may have a maximum of 16 alternates.
:param unit:Coordinate units type
Can be either Imperial (mils) or Metric (mm), expressed as a single
letter: I or M
:param logfam:Logic Family type
Values can be any three alphanumeric characters.
:param attrs:Number of part attributes defined
:param gates:Number of gates in the part
Values range from 0 to 702
:param sigpins:Number of standard signals predefined in the part, which is typically,
but not exclusively, power and ground.
Values range from 0 to 1024.
:param pinmap:Number of alphanumeric pins defined in the part pin mapping.
Values range from 0 to 32767.
:param flag:Decimal value of an eight-bit binary bit string:
Bits 0–1 taken as a two-bit number define the type of part:
0 = normal part
1 = connector
2 = off-page reference.
Bit 2 is a flag that is set for a non-ECO registered part type.
Bit 5 is a flag that is set for a flip chip part ( used in advanced packaging
toolkit)
Bit 6 is a flag that is set for a die part ( used in advanced packaging
toolkit)
Bit 7 is a flag that is set to indicate an incomplete or inconsistent part
type.
:return:
"""
# name pcbdecals u logfam attrs gates sigpins pinmap flag
# TIMESTAMP year.month.day.hour.minute.second
if self.has_part(name):
print('错误:part type已经存在.', name, self.m_pad_parts)
return
self.m_pad_parts[name] = {'name': name, 'decl_name': decl_name, 'unit': unit,
'logfam': logfam, 'attrs': attrs, 'gates': gates, 'sigpins': sigpins,
'pinmap': pinmap, 'flag': flag, 'dt': dt}
def has_decl(self, decl_name):
return decl_name in self.m_pcb_decals
def has_part(self, part_name):
return part_name in self.m_pad_parts
def add_pcb_decal(self, name, unit, originx, originy, dt):
"""
:param name:User-defined decal name. Values can be up to 40 alphanumeric characters.
:param unit: Can be either Imperial (mils) or Metric (mm), expressed as a single letter: I or M.
:param originx: Coordinates of the symbol origin. Expressed in mils.
:param originy:Coordinates of the symbol origin. Expressed in mils.
:return:
"""
if name in self.m_pcb_decals:
print('错误:封装已经存在.', name, self.m_pcb_decals)
return False
self.m_pcb_decals[name] = {'name': name, 'unit': unit,
'x': originx, 'y': originy, 'attrs': {}, 'labels': [], 'terminals': {}, 'stacks': [],
'pieces': [],
'txt': [], 'dt': dt}
return True
def add_txt(self, decl_name, text, x, y, rotation, layer, height, width, mirror, fontinfo, just=0, drwnum=0,
field=0):
"""
:param decl_name:
:param text:Text string
Up to 255 characters, spaces allowed.
:param x: Coordinates of the text string location relative to the origin of the schematic
:param y:
:param rotation:Orientation of the text in degrees
:param layer:Numeric layer number for use in PADS Layout.
Values range from 0 to 250. A layer value of zero means all layers. The layer
number is ignored in PADS Logic.
:param height:Height of text
Values range from 0.01 to 1.0 inches, expressed in the selected units type.
:param width:Width of text in mils. Values range from 0.001 to 0.050 inches, expressed in the selected units type.
:param mirror:Flag indicating text mirroring in PADS Layout. 0 = not mirrored, 1 = mirrored about the y-axis when viewed with zero
:param just:Text string justification
Value is the decimal equivalent of a bit string as follows:
Bits 0 to 3 encode a four-bit value for horizontal justification with the following
values:
0 = Left justified
1 = Center justified
2 = Right justified
Bits 4 to 7 encode a four-bit value for vertical justification with the following
values:
0 = Bottom justified
1 = Middle justified
2 = Top justified.
Allowed values for justification are as follows:
Bottom left = 0
Bottom center = 1
Bottom right = 2
Middle left = 16
Middle center= 17
Middle right = 18
Top left = 32
Top center = 33
Top right = 34
:param drwnum:For auto-dimensioning text, this is the PCB drawing number. For other text, the
value is zero.
:param field: A flag to indicate that the text item is a PADS Logic field label.
:param fontinfo:Font information string, as described in the Font Information Definition section.
:return:
"""
txt_elem = {
'text': text, 'x': x, 'y': y, 'rotation': rotation, 'layer': layer, 'height': height, 'width': width,
'mirror': mirror,
'just': just, 'drwnum': drwnum, 'field': field, 'fontinfo': fontinfo
}
self.m_pcb_decals[decl_name]['txt'].append(txt_elem)
def add_pcb_decal_attrib(self, name, attr_name, attr_value):
"""
"Geometry.Height" 19499961dbunit
:param name:
:param attr_name:
:param attr_value:
:return:
"""
if name not in self.m_pcb_decals:
print('错误:封装不存在.', name, self.m_pcb_decals)
return
self.m_pcb_decals[name]['attrs'][attr_name] = attr_value
def add_pcb_decal_attrib_label(self, decal_name, attr_name, rel_x, rel_y, rotation,
mirror, height, width, layer, just, flags, fontinfo, textstring):
"""
x y rotation mirror height width layer just flags fontinfo textstring
:param decal_name:
:param attr_name:
:param rel_x:Coordinates of the text string location relative to the origin of the schematic
:param rel_y:
:param rotation:Orientation of the text in degrees
:param mirror:Flag indicating text mirroring in PADS Layout.0 = not mirrored, 1 = mirrored about the y-axis when viewed with zero
orientation.
:param height:Height of text Values range from 0.01 to 1.0 inches, expressed in the selected units type
:param width:Width of text in mils Values range from 0.001 to 0.050 inches, expressed in the selected units type
:param layer: Numeric layer number for use in PADS Layout.Values range from 0 to 250. A layer value of zero means all layers.
:param just:Justification of the attribute text string
Value is the decimal equivalent of a bit string as follows:
Bits 0 to 3 encode a four-bit value for horizontal justification with the following
values:
0 = Left justified
1 = Center justified
2 = Right justified
Bits 4 to 7 encode a four-bit value for vertical justification with the following
values:
0 = Bottom justified
1 = Middle justified
2 = Top justified.
Allowed values for 0 and 90 degree rotation are as follows:
Bottom left = 0
Bottom center = 1
Bottom right = 2
Middle left = 16
Middle center= 17
Middle right = 18
Top left = 32
Top center = 33
Top right = 34
:param flags:Type of label, name/value visibility, and right reading status
Values are the decimal equivalent of an eight-bit binary value with bit fields
defined as follows:
Bits 0 to 2 contain a numeric value to define the label type:
0 = General attribute label
1 = Reference designator
2 = Part type
Bit 3 set indicates the label is right reading and displayed at the nearest 90-degree
orientation.
Bit 4 set indicates label is right reading but display is not constrained to a 90-
degree orientation.
Bit 5 set indicates that the attribute value is displayed.
Bit 6 set indicates that the short version of the attribute name is displayed.
Bit 7 set indicates that the full structured attribute name is displayed.
:param fontinfo:Font information for the attribute label text
:param textstring: Name of the attribute whose location is being defined
The reserved names “REF-DES” and “PARTTYPE” refer to reference
designator and part type labels
Up to 255 characters, spaces allowed.
:return:
"""
self.m_pcb_decals[decal_name]['labels'].append({'x': rel_x, 'y': rel_y, 'rotation': rotation,
'mirror': mirror, 'height': height, 'width': width,
'layer': layer, 'just': just, 'flags': flags,
'fontinfo': fontinfo,
'textstring': textstring})
def add_terminal(self, decl_name, pin_number, rx, ry, label_rx, label_ry):
term = {'x': rx, 'y': ry, 'lx': label_rx, 'ly': label_ry, 'pin_number': pin_number}
self.m_pcb_decals[decl_name]['terminals'][pin_number] = term
return
def add_pad_stack(self, decl_name, pin_number, numberlayers, layer_list, plated, drill, drlori, drllen, drloff):
"""
:param decl_name:
:param pin_number:Pin number to which the pad stack applies
If the pin number is zero, then the pad stack applies to all pins that do not have a
specific pad stack
:param numberlayers:Number of pad stack layer lines that follow the header line.
:param plated:Either the keyword P for plated drill hole or N for nonplated drill hole.
:param drill:Drill diameter for the pad
Value of zero indicates that there is no drill hole
:param drlori:Orientation of a slotted hole
Valid values range from 0 to 179.999 degrees.
:param drllen:Slotted hole length
:param drloff: Slot offset
:param layer_list:
Each layer line can have one of the following formats:
layer width shape
(Round normal pad or round and square anti-pads)
layer width shape corner
(square normal pads)
layer width shape intd
(Annular pads)
layer width shape ori length offset
(Oval pads)
layer width shape corner ori length offset
(rectangular pads)
layer width shape ori intd spkwid numspk
layer Layer number
Valid values range from 1 to 250.
or
Layer code of the pin
Layer codes are defined as follows:
-2 is the top layer
-1 is all inner layers
-0 is the bottom layer
width Width of a finger pad or the external diameter of all other pad shapes
shape Shape can be one of the following values:
R—round pad
S—square pad
RA—round anti-pad
SA—square anti-pad
A—annular pad
OF—oval finger pad
RF—rectangular finger pad
RT—round thermal pad
ST—square thermal pad
corner This field stores the numerical “corner radius” value and is used to support pads
with rounded and chamfered corners. It only exists for square (S) pads and
rectangular finger (RF) pad shapes. Zero value is used for 90 degree (nonrounded) pad corners; a positive value is used for pads with rounded corners; a
negative value is used for pads with chamfered corners.
intd Internal diameter of an annular or thermal pad
ori Orientation of a finger pad or the thermal spokes
Valid values range from 0 to 179.999 degrees.
length Finger pad length
offset Finger pad offset
spkwid Thermal pad spoke width
numspk Number of thermal pad spokes
:return:
"""
# PAD pin numlayers plated drill [drlori drllen drloff]
self.m_pcb_decals[decl_name]['stacks'].append({'pin_number': pin_number, 'numlayers': numberlayers,
'plated': plated,
'drill': drill, 'drlori': drlori,
'drllen': drllen, 'drloff': drloff, 'layer_list': layer_list})
def add_pieces(self, decl_name, type, numcoord, width, layer, linestyle, coord_list):
"""
type numcoord width layer linestyle
x y (format for line segment)
x y ab aa ax1 ay1 ax2 ay2 (format for arcs)
:param decl_name:
:param type:
:param numcoord:
:param width:
:param layer:
:param linestyle:
linestyle System flag for type of line or keepout restrictions
A value of 1 indicates a solid line; a value of 0 indicates an old Logic
style dotted line. Negative values indicate line styles introduced in
PADS 9.4 (for piece types OPEN, CLOSED, CIRCLE only):
-1 — solid
-2 — dashed
-3 — dotted
-4 — dash dotted
-5 — dash double-dotted
Positive values indicate Keepout Restrictions (for piece types
KPTCLS, KPTCIR only):
Bit 0: (0x01) Placement
Bit 1: (0x02) Trace and Copper
Bit 2: (0x04) Copper Pour and Plane Area
Bit 3: (0x08) Via and Jumper
Bit 4: (0x10) Test Point
Bit 5 : (0x20) Component Drill
Bit 6: (0x40) Accordion
Since TAGs have no graphics, the linestyle value for TAGs (typically
-1) is non-significant.
:param coord_list:
:return:
"""
self.m_pcb_decals[decl_name]['pieces'].append(
{'type': type, 'numcoord': numcoord, 'width': width, 'layer': layer, 'linestyle': linestyle,
'coord_list': coord_list})
def contractS(self, cont_list, sps):
return sps.join([str(i) for i in cont_list])
def dump_part_types(self):
"""
Each part type entry consists of the following parts:
• Part type header lines
• Attribute information (optional)
• Gate information (optional)
• Signal pin information (optional)
• Alphanumeric pins (optional)
:return:
"""
# def add_pcb_part(self, name, decl_name, unit, dt, logfam='UND', attrs=0, gates=0, sigpins=0, pinmap=0, flag=0):
out_str = ''
for part_name in self.m_pad_parts:
part = self.m_pad_parts[part_name]
out_str += self.contractS(
[self.limit_part_name(part_name), self.limit_decl_name(part['decl_name']), part['unit'], part['logfam'],
part['attrs'], part['gates'], part['sigpins'],
part['pinmap'], part['flag'], '\n'], ' ')
out_str += "TIMESTAMP " + self.contractS(
[part['dt'].year, part['dt'].month, part['dt'].day, part['dt'].hour, part['dt'].minute,
part['dt'].second], '.') + '\n'
return out_str
def dump_pcb_decal(self):
"""
A PCB decal consists of the following parts:
• Header line
• Decal attributes
• Attribute label locations
• Piece definitions
• Text definitions
Terminal definitions
• Pad-stack definitions
• Maximum layers designation
:return:
"""
out_str = ''
for decl_name in self.m_pcb_decals:
decl = self.m_pcb_decals[decl_name]
##############Header line
# name u x y attrs labels pieces txt terminals stacks maxlayers
out_str += self.contractS([self.limit_decl_name(decl['name']), decl['unit'], decl['x'], decl['y'],
len(decl['attrs']), len(decl['labels']), len(decl['pieces']), len(decl['txt']),
len(decl['terminals']), len(decl['stacks']), 0, '\n'], ' ')
out_str += "TIMESTAMP " + self.contractS(
[decl['dt'].year, decl['dt'].month, decl['dt'].day, decl['dt'].hour, decl['dt'].minute,
decl['dt'].second], '.') + '\n'
##############Decal attributes
for i in decl['attrs']:
out_str += '"' + i + '" ' + decl['attrs'][i] + '\n'
##############Attribute Labels Format
for i in decl['labels']:
# 0 0 0 0 1.27 0.127 1 0 34 "Regular <Romansim Stroke Font>"
# x y rotation mirror height width layer just flags fontinfo textstring
out_str += self.contractS(
[i['x'], i['y'], i['rotation'], i['mirror'], i['height'], i['width'], i['layer'], i['just'],
i['flags'], '\"' + i['fontinfo'] + '\"'], ' ')
out_str += '\n'
out_str += i['textstring'] + '\n'
for i in decl['pieces']:
out_str += self.contractS([i['type'], i['numcoord'], i['width'], i['layer'], i['linestyle']], ' ')
out_str += '\n'
for j in i['coord_list']:
out_str += self.contractS(j, ' ')
out_str += '\n'
for i in decl['txt']:
# x y rotation layer height width mirror just drwnum field fontinfo
out_str += self.contractS(
[i['x'], i['y'], i['rotation'], i['layer'], i['height'], i['width'], i['just'],
i['drwnum'], i['field'], i['fontinfo']], ' ')
out_str += '\n'
out_str += i['text']
out_str += '\n'
for i in decl['terminals']:
# Tx1 y1 x2 y2 pin
out_str += 'T' + self.contractS(
[decl['terminals'][i]['x'], decl['terminals'][i]['y'], decl['terminals'][i]['lx'],
decl['terminals'][i]['ly'], decl['terminals'][i]['pin_number']], ' ')
out_str += '\n'
for i in decl['stacks']:
# PAD pin numlayers plated drill [drlori drllen drloff]
out_str += "PAD " + self.contractS(
[i['pin_number'], i['numlayers'], i['plated'], i['drill'], i['drlori'], i['drllen'], i['drloff']],
' ')
out_str += '\n'
for j in i['layer_list']:
out_str += self.contractS(j, ' ') + '\n'
return out_str
def dump(self):
out_str = ''
out_str += self.get_start_of_file() + '\n'
if self.m_format == 'pcb_decals':
out_str += self.dump_pcb_decal()
elif self.m_format == 'part_types':
out_str += self.dump_part_types()
out_str += self.get_eof() + '\n'
return out_str
```
#### File: easyeda_to_pads/src/szlc_to_pads.py
```python
__author__ = "songjiangshan"
__copyright__ = "Copyright (C) 2021 songjiangshan \n All Rights Reserved."
__license__ = ""
__version__ = "1.0"
import sys
import time
import codecs
import requests # 导入requests包
import json
from pads_ascii import PadsAscii
from easyeda import EasyEda
from szlc_read import get_comp_uuid_list, get_one_decl
from easy_to_pads import easy_to_pads
import datetime
from line_profiler import LineProfiler
def lc_get_comp_decl(comp_uuid):
url = 'https://lceda.cn/api/components?version=6.4.20.2&docType=2&uid=0819f05c4eef4c71ace90d822a990e87&type=3'
url2 = 'https://lceda.cn/api/components/' + comp_uuid + '?version=6.4.20.2&uuid=' + comp_uuid + '&datastrid='
strhtml = requests.get(url2) # Get方式获取网页数据
comp_list_ret = json.loads(strhtml.text)
packageDetail = comp_list_ret['result']['packageDetail']
return packageDetail, comp_list_ret['result']['dataStr']['head']
def lc_search(user_id, keyword):
"""
:param user_id:
:param keyword:
:return:
"""
url = 'https://lceda.cn/api/components/search'
param = {'type': 3, 'doctype[]': 2, 'uid': user_id, 'returnListStyle': 'classifyarr', 'wd': keyword,
'version': '6.4.20.2'}
ret = requests.post(url, param)
ret_j = json.loads(ret.text)
if ret_j['success'] is not True:
print('some error:', ret_j['message'])
return None, None
pkt_uuid = ret_j['result']['lists']['lcsc'][0]['uuid']
pkt_title = ret_j['result']['lists']['lcsc'][0]['title']
return pkt_uuid, pkt_title
def etopads(ddetail_json: dict, partdetail_json: dict, a: PadsAscii):
"""
:param ddetail_json:
:param a:
:return:
"""
easy = EasyEda()
t1 = time.time()
packageDetail = ddetail_json
easy.parse_decl_json(packageDetail)
easy.org_to_zero()
easy.y_mirror()
easy.hole_to_pad()
easy.pin_renumber()
easy.pin_resort()
t2 = time.time()
package_decl_name = easy.pDetail['decl_name']
if partdetail_json is not None:
part_name = partdetail_json['c_para']['name']
part_time = partdetail_json.get('utime')
if (part_time == '') or (part_time is None):
part_time = time.time()
else:
part_time = 0
part_name = None
part_time = int(part_time)
a = easy_to_pads(easy, part_name, part_time, a)
t3 = time.time()
return a, [t2 - t1, t3 - t2]
def save_to_file(stri, file_name):
f = open(file_name, 'w+')
f.write(json.dumps(stri, indent=4))
f.close()
def pull_one_comp():
a = PadsAscii()
user_id = '0819f05c4eef4c71ace90d822a990e87'
keywords = ['SMA-TH_SMA-KWE903', 'ANT-SMD_KH-IPEX-K501-29', 'MICRO-SIM-SMD_SIM-002-A6',
'LCC-LGA-58_L17.7-W15.8-P1.1-TL-BC260Y-CN'] # , ]#, , 'SOT-23-3_L2.9-W1.3-P1.90-LS2.4-BR']
for kw in keywords:
puuid, ptitle = lc_search(user_id, kw)
# puuid = '5ec5c544aad7443f95c394098550fb07'
ddetail, partdetail = lc_get_comp_decl(puuid)
a = etopads(ddetail, partdetail, a)
f = open('out.d', 'w+')
a.set_format('pcb_decals')
f.write(a.dump())
f.close()
f = open('out.p', 'w+')
a.set_format('part_types')
f.write(a.dump())
f.close()
from szlc_read import get_decl_list
def szlc_to_pads_decl_list(decl_title_list):
"""
通过封装名称的列表,导出封装
:param title_list:
:return:
"""
a = PadsAscii()
cnt = 0
decl_list = get_decl_list(decl_title_list)
for i in decl_list:
t1 = time.time()
t2 = time.time()
a, time_list = etopads(i[3], None, a)
print('\r', cnt, end='')
cnt += 1
f = open('out.d', 'w+')
a.set_format('pcb_decals')
f.write(a.dump())
f.close()
f = open('out.p', 'w+')
a.set_format('part_types')
f.write(a.dump())
f.close()
def szlc_to_pads_2k():
"""
函数主要时间在通过uuid查询decl_json_data. 需要创建索引来加快速度
:return:
"""
a = PadsAscii()
cnt = 0
comp_list = get_comp_uuid_list()
t_read = 0
t_easy = 0
t_pads = 0
for i in comp_list:
t1 = time.time()
decl_data = get_one_decl(i[2])
t2 = time.time()
a, time_list = etopads(decl_data[1], i[3]['head'], a)
t_read += t2 - t1
t_easy += time_list[0]
t_pads += time_list[1]
print('\r', cnt, t_read, t_easy, t_pads, end='')
if cnt % 2000 == 0:
print('\r', t2 - t1, time_list, cnt, len(comp_list), end='')
if cnt > 0:
break
cnt += 1
f = open('out.d', 'w+')
a.set_format('pcb_decals')
f.write(a.dump())
f.close()
f = open('out.p', 'w+')
a.set_format('part_types')
f.write(a.dump())
f.close()
from szlc_read import get_comp_tags
def comp_save_by_tags():
# 按照分类导出各类数据到指定文件
# 7637 里面的某个值,有问题
comp_list = get_comp_tags()
tag_list=list(set([ i[4] for i in comp_list]))
tag_list.sort()
print(tag_list)
#tag_list = tag_list[0:10]
#tag_list = tag_list[10:20]
#tag_list = tag_list[20:30]
#tag_list = tag_list[30:50]
#tag_list = tag_list[50:70]
#tag_list = tag_list[70:90]
#tag_list = tag_list[90:110]
#tag_list = tag_list[110:150]
#tag_list = tag_list[150:200]
#tag_list = tag_list[200:300]
tag_list = tag_list[300:]
tags_pads = {}
cnt = 0
for i in comp_list:
curr_tag = i[4]
comp_uuid = i[1]
decl_uuid = i[2]
sch = i[3]
if curr_tag not in tag_list:
continue
print('\r', i[0], end='')
if curr_tag not in tags_pads:
tags_pads[curr_tag] = PadsAscii()
decl_data = get_one_decl(decl_uuid)
tags_pads[curr_tag], time_list = etopads(decl_data[1], sch['head'], tags_pads[curr_tag])
cnt += 1
print('to pads ready.')
cnt = 0
for tag_title in tags_pads:
tag_title_file_name = tag_title.encode('gbk', 'ignore').decode('gbk', 'ignore').replace('[', '').replace(']', '').replace('"', '').replace('/', '_').replace(
' ', '_').replace(',', '_').replace('\\uff0c', '_').replace('\\uff08', '').replace('\\uff09', '').replace('\\u4f5c', '').replace('\\u5e9f', '')
f = open('./lc_pads/'+tag_title_file_name + '.d', 'wb+')
tags_pads[tag_title].set_format('pcb_decals')
f.write(tags_pads[tag_title].dump().encode('gbk', 'ignore'))
f.close()
f = open('./lc_pads/'+tag_title_file_name + '.p', 'wb+')
tags_pads[tag_title].set_format('part_types')
f.write(tags_pads[tag_title].dump().encode('gbk', 'ignore'))
f.close()
print('\r', cnt, end='')
cnt += 1
if __name__ == '__main__':
comp_save_by_tags()
sys.exit(0)
lp = LineProfiler()
lp_wrapper = lp(comp_save_by_tags)
lp_wrapper()
lp.print_stats()
sys.exit(0)
# szlc_to_pads_decl_list(['SMA-SMD_BWSMA-KE-P001', 'IND-SMD_L3.6-W2.9', 'SOT-363_L2.0-W1.3-P0.65-LS2.1-TL', 'SOT-23-3_L2.9-W1.3-P1.90-LS2.4-BR'])
szlc_to_pads_decl_list(['CAP-SMD_L7.3-W4.3-R-RD'])
sys.exit(0)
lp = LineProfiler()
lp.add_function(get_comp_uuid_list) # add additional function to profile
lp.add_function(get_one_decl)
lp_wrapper = lp(szlc_to_pads_2k)
lp_wrapper()
lp.print_stats()
``` |
{
"source": "Jiangshan00001/lanenet_test",
"score": 2
} |
#### File: Jiangshan00001/lanenet_test/run.py
```python
from DiscriminativeLoss import DiscriminativeLoss
from LaneDataset import LaneDataset
import torch
import cv2
import numpy as np
from ENet import ENet
from matplotlib import pyplot as plt
from sklearn.cluster import DBSCAN, OPTICS
from LaneNetCluster import LaneNetCluster
import sys
DEFAULT_SIZE = (256, 512)
def run_image_once(lane_net, data0):
# load dataset
#######################################
# calc data
bin_logits, inst_logits = lane_net.forward(data0)
###########################################
# image show
img_org = data0.cpu().detach().numpy()
img_org = np.array(np.transpose(np.squeeze(img_org), (1, 2, 0)), dtype=np.uint8)
# cv2.imshow('img', img_org)
img_gray = np.mean(img_org, axis=2)
bin_logits = bin_logits.cpu().detach().numpy()[0]
y212345 = inst_logits.cpu().detach().numpy()[0]
lnc = LaneNetCluster()
mask_image, lane_lines, cluster_index = lnc.get_lane_lines(bin_logits, y212345)
return mask_image, lane_lines, cluster_index
if __name__ == '__main__':
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
torch.cuda.empty_cache()
if len(sys.argv)<2:
model_path = 'lanenet_epoch_2_iter_550_batch_2.model'
else:
model_path = sys.argv[1]
if len(sys.argv)<3:
#data0 = cv2.imread('/data/tusimple/clips/0531/1492626270684175793/1.jpg')
data0 = cv2.imread('./pics/1.jpg')
data0 = cv2.resize(data0, (DEFAULT_SIZE[1], DEFAULT_SIZE[0]))
data0 = np.array(np.transpose(data0, (2, 0, 1)), dtype=np.float32)
data0 = data0.reshape([1, data0.shape[0], data0.shape[1], data0.shape[2]])
else:
data0 = cv2.imread(sys.argv[2])
data0 = cv2.resize(data0, (DEFAULT_SIZE[1], DEFAULT_SIZE[0]))
data0 = np.array(np.transpose(data0, (2, 0, 1)), dtype=np.float32)
data0 = data0.reshape([1, data0.shape[0], data0.shape[1], data0.shape[2]])
data0 = torch.from_numpy(data0).cuda()
if len(sys.argv)<4:
is_plot=1
else:
is_plot = int(sys.argv[3])
#######################
#load model
lane_net = ENet()
lane_net.load_state_dict(torch.load(model_path))
lane_net.to(device)
mask_image, lane_lines, cluster_index = run_image_once(lane_net, data0)
if is_plot:
f, axarr = plt.subplots(len(lane_lines)+2, 1)
axarr[0].imshow(cv2.imread('./pics/1.jpg')) #FIXME: this should be replace with real data???
axarr[1].imshow(mask_image)
for i in range(len(lane_lines)):
axarr[2+i].plot(lane_lines[i][1],lane_lines[i][0])
axarr[2+i].set_xlim([0, DEFAULT_SIZE[1]])
axarr[2 + i].set_ylim([DEFAULT_SIZE[0],0])
axarr[2 + i].set_aspect('equal')
plt.savefig('./pics/run_resultimage.png')
plt.show()
```
#### File: Jiangshan00001/lanenet_test/train.py
```python
import random
import time
import gc
from DiscriminativeLoss import DiscriminativeLoss
from ENet import ENet
from LaneDataset import LaneDataset
DEFAULT_SIZE = (256, 512)
import torch
from PIL import Image
import json
import os
import cv2
import numpy as np
import glob
##############################################
##################################################
##################################################
# TODO: Train segmentation and instance segmentation
from torch.autograd import Variable
from torch.utils.data import DataLoader
from matplotlib import pyplot as plt
def train_loop(dataloader, model, loss_fn, optimizer):
global device
learning_rate = 5e-4
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.Adam(params, lr=learning_rate, weight_decay=0.0002)
is_plot_show_detail=False
is_plot=False
plot_iter=20
plot_detail_iter=300
if is_plot:
f2, axarrorg = plt.subplots(4, 5)
if is_plot_show_detail:
f, axarr = plt.subplots(2, 4)
# org
# bin inst labels
# bin dist
# bin img
# inst dist
# inst img
loss_record=[]
for epoch in range(0,5):
torch.cuda.empty_cache()
model.train()
t1=time.time()
for iter, (X, seg_img, inst_img) in enumerate(dataloader):
#if (iter %(int(random.randint(1,2))+1)==0):
# #skip some to speedup
# continue
X = Variable(X)
seg_img = Variable(seg_img)
inst_img = Variable(inst_img)
if torch.cuda.is_available():
X=X.cuda()
seg_img = seg_img.cuda()
inst_img=inst_img.cuda()
optimizer.zero_grad()
with torch.cuda.amp.autocast():
# Compute prediction and loss
#print('model start', time.time()-t1)
seg_out, inst_out = model(X)
#print('loss start', time.time() - t1)
if is_plot_show_detail and (iter % plot_detail_iter == 0):
bin_loss, inst_loss = loss_fn(seg_out, seg_img, inst_out, inst_img, plt_bin=axarr[0], plt_inst=axarr[1])
else:
bin_loss, inst_loss = loss_fn(seg_out, seg_img, inst_out, inst_img)
loss_all = bin_loss + inst_loss
#print('Backpropagation start', time.time() - t1)
# Backpropagation
loss_all.backward()
optimizer.step()
if iter % 10 == 0:
print('epoch:{}. iter:{}. bin_loss:{}. inst_loss:{}. loss_all:{}, time:{}'.format(epoch, iter, bin_loss, inst_loss,loss_all,time.time()-t1))
loss_record.append(loss_all.item())
torch.save(model.state_dict(),
f"lanenet_epoch_{epoch}_iter_{iter}_batch_{batch_size}.model")
if is_plot and(iter%plot_iter==0):
axarrorg[0, 0].set_title(f'iter:{iter}')
img_org = np.array(np.transpose(np.squeeze(X.cpu().detach().numpy()[0]), (1, 2, 0)), dtype=np.uint8)
axarrorg[0, 0].imshow(img_org)
cv2.imshow('img_org', img_org)
img_gray = np.mean(img_org, axis=2)
axarrorg[1, 0].imshow(img_gray * 0.1 + seg_img.cpu().detach().numpy()[0] * 10)
axarrorg[1, 1].imshow(img_gray * 0.1 + inst_img.cpu().detach().numpy()[0]*100)
for i in range(len(seg_out[0])):
axarrorg[2, i].imshow(seg_out[0][i].cpu().detach().numpy())
for i in range(len(inst_out[0])) :
axarrorg[3, i].imshow(inst_out[0][i].cpu().detach().numpy())
#plt.draw()
#plt.show(block=False)
plt.pause(1)
torch.save(model.state_dict(),
f"lanenet_epoch_{epoch}_iter_{12345}_batch_{batch_size}.model")
torch.save(model.state_dict(),
f"lanenet_epoch_{12345}_iter_{12345}_batch_{batch_size}.model")
to_rec=json.dumps(loss_record)
f=open('lanenet_loss.txt', 'w')
f.write(to_rec)
f.close()
if __name__=='__main__':
ldata = LaneDataset('/data/tusimple')
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
batch_size = 2
print('curr device:', device)
torch.cuda.empty_cache()
model = ENet()
#model_path = 'lanenet_epoch_1_iter_10_batch_2.model'
#model.load_state_dict(torch.load(model_path))
# 加载之前训练的数据
model.to(device)
loss_fn = DiscriminativeLoss()
train_dataloader = DataLoader(ldata, batch_size=batch_size, shuffle=True)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
train_loop(train_dataloader, model, loss_fn, optimizer)
``` |
{
"source": "Jiangshan00001/pylibcamera",
"score": 2
} |
#### File: pylibcamera/test/libtest2.py
```python
from pylibcamera import lvid_api as cam_vid
from pylibcamera import ffi_vid as ffi
import time
import numpy as np
import cv2
import sys
def imagesc_show(datab):
print('datab shape:', datab.shape)
height, width = datab.shape[0:2]
mmax = np.max(datab)
mmin = np.min(datab)
print('max min:', mmax, mmin)
hist1, bins = np.histogram(datab.ravel(), 256, [0, 4096])
print(hist1)
datab = datab.astype(np.float32)
I_cv2_norm = (datab - mmin) * 255.0 / (mmax - mmin)
I_cv2_norm = np.clip(I_cv2_norm, 0, 255)
I_cv2_norm = I_cv2_norm.astype(np.uint8)
img_uint16_data2 = cv2.resize(I_cv2_norm, (width // 4, height // 4), cv2.INTER_NEAREST)
print('img shape:', img_uint16_data2.shape, img_uint16_data2.dtype)
img_uint16_data2 = cv2.equalizeHist(img_uint16_data2)
cv2.imshow('test', img_uint16_data2)
cv2.waitKey(1)
if __name__=='__main__':
width = 4056
height = 3040
#width = 1920
#height = 1080
print('cam_vid start')
inst=cam_vid.lvid_init()
print('cam_vid_set_arg start')
cam_vid.lvid_set_arg(inst, b'camera', b'0')
#cam_vid.lvid_set_arg(inst, b'transform', b'0')
cam_vid.lvid_set_arg(inst, b'height', str(height).encode('utf-8'))
cam_vid.lvid_set_arg(inst, b'width', str(width).encode('utf-8'))
cam_vid.lvid_set_arg(inst, b'codec', b'mjpeg')
cam_vid.lvid_set_arg(inst, b'quality', b'90')
cam_vid.lvid_set_arg(inst, b'shutter', b'10000')
cam_vid.lvid_set_arg(inst, b'verbose', b'1')
cam_vid.lvid_print_parameters(inst)
print('cam_vid_get_frame_size start')
frame_size=cam_vid.lvid_get_frame_size(inst)
print('frame size:', frame_size)
#buffer=b'\x00'*frame_size
print('cam_vid_start_camera start')
cam_vid.lvid_start_camera(inst)
print('cam_vid_wait_for_frame start')
t1=time.time()
cnt=0
sec_cnt=0
time_stamp = ffi.new("unsigned long [2]")
#raw_buf = ffi.new("unsigned char [" + str(int(frame_size)) +']')
size_of_one_color=int(width*height//2//2)
buffer_np = np.zeros(frame_size, dtype=np.uint8)
raw_buf = ffi.cast('unsigned char*', buffer_np.ctypes.data)
file_index=0
while True:
siz = cam_vid.lvid_wait_for_frame(inst, raw_buf,frame_size, time_stamp)
f=open('test_'+str(file_index)+'.jpg', 'wb')
f.write(ffi.buffer(raw_buf, siz) )
f.close()
file_index+=1
#print(siz)
cnt+=1
if time.time()-t1>1.0:
print('get frame:',cnt)
cnt=0
t1=time.time()
sec_cnt+=1
if sec_cnt>3.0:
break
print('lvid_stop_camera start')
cam_vid.lvid_stop_camera(inst)
print('lvid_deinit start')
cam_vid.lvid_deinit(inst)
``` |
{
"source": "Jiangshan00001/pyuwb",
"score": 3
} |
#### File: pyuwb/pyuwb/anchor_locate_algorithm1.py
```python
from .locate_base import AnchorLocateBase
from .dingwei import Dingwei
class AnchorLocateAlgorithm1(AnchorLocateBase):
"""
根据基站的距离信息,生成位置信息
"""
def __init__(self):
super().__init__()
def calc(self):
"""
self.anchor_list [
{
client_id:'1-2-3',
dist:{'1-2-1':10.5,... },
direction_point:'N'
},
...
]
:return:
"""
# 次基站定位
anchor_pos_list = Dingwei().calc_anchor_pos(self.anchor_list)
ret = []
for i in anchor_pos_list:
d = {}
d['client_id'] = i['client_id']
d['pos'] = i['pos']
ret.append(d)
print('基站坐标:', ret)
return ret
def get_dist(xy1,xy2):
return ((xy1['x']-xy2['x'])**2+(xy1['y']-xy2['y'])**2)**0.5
def test_rect1():
"""
基站在10x30长方形4个顶点:
:return:
"""
a = AnchorLocateAlgorithm1()
anchor_and_dist = [
{'client_id': '1-2-0', 'direction_point': 'N',
'dist': {'1-2-0': 0, '1-2-1': 30, '1-2-2': 10 * (10 ** 0.5), '1-2-3': 10}, 'height': 0.5},
{'client_id': '1-2-1', 'direction_point': 'S',
'dist': {'1-2-0': 30, '1-2-1': 0, '1-2-2': 10, '1-2-3': 10 * (10 ** 0.5)}, 'height': 1.1},
{'client_id': '1-2-2', 'direction_point': 'E',
'dist': {'1-2-0': 10 * (10 ** 0.5), '1-2-1': 10, '1-2-2': 0, '1-2-3': 30}, 'height': 0.5},
{'client_id': '1-2-3', 'direction_point': None,
'dist': {'1-2-0': 10, '1-2-1': 10 * (10 ** 0.5), '1-2-2': 30, '1-2-3': 0}, 'height': 1.1}
]
a.set_anchor_info(anchor_and_dist)
a.calc()
l = a.get_anchor_pos()
assert (get_dist(l[0]['pos'], {'x': 0, 'y':30})<0.1)
assert (get_dist(l[1]['pos'], {'x': 0, 'y':0})<0.1)
assert (get_dist(l[2]['pos'], {'x': 10, 'y':0})<0.1)
assert (get_dist(l[3]['pos'], {'x': 10, 'y':30})<0.1)
if __name__ == '__main__':
test_rect1()
```
#### File: pyuwb/pyuwb/client_id_utils.py
```python
__author__ = "songjiangshan"
__copyright__ = "Copyright (C) 2021 songjiangshan \n All Rights Reserved."
__license__ = ""
__version__ = "1.0"
DEVICE_TYPE_TAG=0 #OLD3
DEVICE_TYPE_ANCHOR=1 #OLD2
DEVICE_TYPE_ANCHORZ=2 #OLD1
def client_id_remove_group(client_id):
return str(client_id_get_type(client_id)) + '-' + str(client_id_get_no(client_id))
def client_id_get_no(client_id: str):
"""
输入字符串,返回对应的号
:param client_id:
:return: int型数值
"""
id = int(client_id.split('-')[-1])
return id
def client_id_get_type(client_id):
id = int(client_id.split('-')[-2])
return id
def client_id_get_group(client_id):
id = int(client_id.split('-')[0])
return id
def pack_client_id(group_id=0, type_int=DEVICE_TYPE_ANCHOR, no=0):
ret = str(group_id) + '-' + str(type_int) + '-' + str(no)
return ret
if __name__ == '__main__':
assert client_id_get_no('1-2-3') == 3
assert client_id_get_no('1-2-20') == 20
assert client_id_get_no('1-12-21') == 21
print(client_id_get_type('1-12-21'))
# assert client_id_get_type('1-2-3') == 2
# assert client_id_get_group('1-2-3') == 1
# assert client_id_get_group('2-2-3') == 2
```
#### File: pyuwb/pyuwb/tag_locate_algorithm1.py
```python
from .dingwei import Dingwei
from .mylog import logging
logger = logging.getLogger(__name__)
class TagLocateAlgorithm1(Dingwei):
def __init__(self):
super().__init__()
def calc(self, tag_dist, tag_client_id, anchor_pos=None, using_kalman= None):
"""
传递基站位置信息和标签与基站的距离信息,返回标签位置
:param anchor_pos:[{client_id:1-2-3, pos:{x:,y:,z:}}...]
:param tag_dist: {1-2-3:10.4, 1-2-0:5,...}
:return:{x:,y:, z:}
"""
jizhan_pos_param = anchor_pos
tag_pos_dict = {}
# {'client_id': '1-3-1', 'dist': {'1-2-0': 5 * (10 ** 0.5), '1-2-1': 5 * (10 ** 0.5), '1-2-2': 5 * (10 ** 0.5), '1-2-3': 5 * (10 ** 0.5),...}, ...}
tag_dict = {'dist': tag_dist, 'client_id': tag_client_id}
tag_pos_one_list = self.dingwei_biaoqian_dist_to_pos(jizhan_pos_param, [tag_dict], using_kalman=None)
logger.debug('tag_pos_one_list %s', tag_pos_one_list)
return tag_pos_one_list[0]['pos']
def test_demo1():
anchor_pos = [
{'client_id': '1-2-0', 'pos': {'x': 0, 'y': 30, 'z': 0.5}},
{'client_id': '1-2-1', 'pos': {'x': 0, 'y': 0, 'z': 1.1}},
{'client_id': '1-2-2', 'pos': {'x': 10, 'y': 0, 'z': 0.5}},
{'client_id': '1-2-3', 'pos': {'x': 10, 'y': 30, 'z': 1.1}}
]
tag_dist = {'1-2-0': 5 * (10 ** 0.5), '1-2-1': 5 * (10 ** 0.5), '1-2-2': 5 * (10 ** 0.5), '1-2-3': 5 * (10 ** 0.5)}
t = TagLocateAlgorithm1()
t.calc(tag_dist, '1-3-1', anchor_pos)
if __name__ == '__main__':
test_demo1()
``` |
{
"source": "Jiangshan616/gmail-username-available",
"score": 3
} |
#### File: Jiangshan616/gmail-username-available/gmail_username_try.py
```python
# -*- coding: UTF-8 -*-
import os
import sys
import subprocess
import re
import time
import random
import string
from datetime import datetime
from datetime import timedelta
# Part 1: Username dict
USERNAME_LIST_FULL = []
# with open('4_letter_words.txt') as fp:
# USERNAME_LIST_FULL = fp.read().splitlines()
with open('words_raw.txt') as fp:
results = fp.read().splitlines()
rr = ''.join(word for word in results)
USERNAME_LIST_FULL = [word.lower() for word in re.findall('"word":"(\w{4})"', rr)]
PREFIX = 'zz'
NUM_SELECT = 100
USERNAME_LIST = random.sample(USERNAME_LIST_FULL, NUM_SELECT)
print('There are total %d/%d name available' % (len(USERNAME_LIST_FULL), len(USERNAME_LIST)))
# Part 2: Helper function
# Function used to randomize credentials
def randomize(
_length_
):
if _length_ > 0 :
string._characters_='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
_generated_info_=''
for _counter_ in range(0,_length_) :
_generated_info_= _generated_info_ + random.choice(string._characters_)
return _generated_info_
else:
msg(3,'No valid length specified...')
ext()
# Part 3: Main function
def main():
curl_link = ''
if not curl_link:
curl_link = input('请输入你的专属链接:\n')
curl_link = re.sub(r'curl', r'curl -s', curl_link)
print(curl_link)
gmail_address_okay = []
for ii in range(len(USERNAME_LIST)):
# gmail_address_try = randomize(6)
gmail_address_try = PREFIX + USERNAME_LIST[ii]
print(gmail_address_try)
curl_link = re.sub(r'GmailAddress=(\w+)&', r'GmailAddress={}&'.format(gmail_address_try), curl_link)
response = subprocess.check_output(curl_link, shell=True)
#print(response)
if b'This username is already taken' in response:
print('%d: The username of %s is taken\n' % (ii, gmail_address_try))
elif b'Your username is' in response:
print('%d: The username of %s is okay\n' % (ii, gmail_address_try))
gmail_address_okay.append(gmail_address_try)
else:
print('更新您的专属链接或者更换ip')
time.sleep(random.randint(1, 3))
print('Mail okay\n')
print(gmail_address_okay)
with open('gmail_okay.txt', 'w') as fp:
for mail in gmail_address_okay: fp.write(mail + '\n')
if __name__ == '__main__':
main()
``` |
{
"source": "jiangshanmeta/meta",
"score": 3
} |
#### File: src/0690.employee-importance.690/0690.employee-importance.690.py
```python
class Solution:
def getImportance(self, employees, id):
"""
:type employees: Employee
:type id: int
:rtype: int
"""
dict = {}
for employee in employees:
dict[employee.id] = employee
self.dict = dict
return self.dfs(id)
def dfs(self,id):
node = self.dict[id]
value = node.importance
for employee in node.subordinates :
value += self.dfs(employee)
return value
```
#### File: src/1095.find-in-mountain-array.1185/1095.find-in-mountain-array.1185.py
```python
class Solution:
def findInMountainArray(self, target: int, mountain_arr: 'MountainArray') -> int:
L = mountain_arr.length()
# 找到峰顶
peakIndex = 0
low = 0
high = L-1
while low<=high :
mid = (low+high)//2
midVal = mountain_arr.get(mid)
rightVal = mountain_arr.get(mid+1)
if midVal < rightVal :
low = mid+1
else :
leftVal = mountain_arr.get(mid-1)
if midVal>leftVal :
peakIndex = mid
break
else :
high = mid-1
# 在左边找
low = 0
high = peakIndex-1
while low<=high:
mid = (low+high)//2
midVal = mountain_arr.get(mid)
if midVal == target :
return mid
elif midVal<target:
low = mid+1
else :
high = mid-1
# 在右边找
low = peakIndex
high = L-1
while low<=high:
mid = (low+high)//2
midVal = mountain_arr.get(mid)
if midVal == target :
return mid
elif midVal<target :
high = mid-1
else :
low = mid+1
# 都没有
return -1
```
#### File: src/1301.number-of-paths-with-max-score.1234/1301.number-of-paths-with-max-score.1234.py
```python
class Solution:
def pathsWithMaxScore(self, board) :
# 动态规划
# scoreDp[i][j] = max( scoreDp[i+1][j] , scoreDp[i][j+1] , scoreDp[i+1][j+1] )
L = len(board)
modVal = 10**9+7
scoreDp = [0 for i in range(L)]
countDp = [0 for i in range(L)]
scoreDp[L-1] = [0 for i in range(L)]
countDp[L-1] = [0 for i in range(L)]
countDp[L-1][L-1] = 1
for j in range(L-2,-1,-1):
if board[L-1][j] == 'X':
break
scoreDp[L-1][j] = scoreDp[L-1][j+1]+int(board[L-1][j])
countDp[L-1][j] = 1
for i in range(L-2,-1,-1):
scoreDp[i] = [0 for i in range(L)]
countDp[i] = [0 for i in range(L)]
if board[i][L-1] == 'X' :
scoreDp[i][L-1] = 0
else :
scoreDp[i][L-1] = scoreDp[i+1][L-1]+int(board[i][L-1])
if countDp[i+1][L-1]>0 :
countDp[i][L-1] = 1
for j in range(L-2,-1,-1):
if board[i][j] == 'X':
scoreDp[i][j] = 0
countDp[i][j] = 0
continue
node = 0
if i>0 or j>0:
node = int(board[i][j])
maxVal = max(scoreDp[i+1][j],scoreDp[i][j+1],scoreDp[i+1][j+1])
scoreDp[i][j] = maxVal+node
if scoreDp[i+1][j] == maxVal:
countDp[i][j] = (countDp[i][j]+countDp[i+1][j])%modVal
if scoreDp[i][j+1] == maxVal:
countDp[i][j] = (countDp[i][j]+countDp[i][j+1])%modVal
if scoreDp[i+1][j+1] == maxVal :
countDp[i][j] = (countDp[i][j]+countDp[i+1][j+1])%modVal
if countDp[0][0] == 0:
return [0,0]
else :
return [scoreDp[0][0],countDp[0][0]]
``` |
{
"source": "JiangShaoYin/MTCNN",
"score": 3
} |
#### File: MTCNN/train_models/train_PNet.py
```python
from mtcnn_model import P_Net
from train import train
def train_PNet(base_dir, prefix, end_epoch, display, lr):
net_factory = P_Net
train(net_factory, prefix, end_epoch, base_dir, display=display, base_lr=lr)
if __name__ == '__main__':
base_dir = '../prepare_data/imglists/PNet' # 数据读取文件
model_name = 'MTCNN'
model_path = '../data/%s_model/PNet_landmark/PNet' % model_name # 模型输出文件
prefix = model_path
end_epoch = 30 # 结束
display = 1
lr = 0.01
train_PNet(base_dir, prefix, end_epoch, display, lr)
``` |
{
"source": "jiangshide/pdk",
"score": 2
} |
#### File: pdk/build/pdk_utils.py
```python
import os, string, sys, shutil, zipfile
def copy_dir(src_top, dest_top, dir_name, cp_option = ""):
"""copy all the files under src_top/dir_name to dest_top/dir_name."""
src_full_path = src_top + "/" + dir_name
# do not create the leaf dir as cp will create it
[mid_path, leaf_path] = dir_name.rsplit("/", 1)
dest_full_path = dest_top + "/" + mid_path
if not os.path.isdir(dest_full_path):
os.makedirs(dest_full_path)
print "copy dir ", src_full_path, " to ", dest_full_path
os.system("cp -a " + " " + cp_option + " " + src_full_path + " " + dest_full_path)
def copy_dir_only_file(src_top, dest_top, dir_name):
"""copy only files directly under the given dir_name"""
src_full_path = src_top + "/" + dir_name
dest_full_path = dest_top + "/" + dir_name
if not os.path.isdir(dest_full_path):
os.makedirs(dest_full_path)
children = os.listdir(src_full_path)
for child in children:
child_full_name = src_full_path + "/" + child
if os.path.isfile(child_full_name):
print "copy file ", child_full_name, " to ", dest_full_path
os.system("cp -a " + child_full_name + " " + dest_full_path)
def copy_files(src_top, dest_top, files_name):
"""copy files from src_top to dest_top.
Note that files_name can include directories which will be created
under dest_top"""
src_full_path = src_top + "/" + files_name
# do not create the leaf dir as cp will create it
[mid_path, leaf_path] = files_name.rsplit("/", 1)
dest_full_path = dest_top + "/" + mid_path
if not os.path.isdir(dest_full_path):
os.makedirs(dest_full_path)
print "copy files ", src_full_path, " to ", dest_full_path
os.system("cp -a " + src_full_path + " " + dest_full_path)
def copy_file_if_exists(src_top, dest_top, file_name):
"""copy file src_top/file_name to dest_top/file_name
returns false if such file does not exist in source."""
src_full_name = src_top + "/" + file_name
if not os.path.isfile(src_full_name):
print "file " + src_full_name + " not found"
return False
dest_file = dest_top + "/" + file_name
dest_dir = os.path.dirname(dest_file)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
print "copy file ", src_full_name, " to ", dest_file
os.system("cp -a " + src_full_name + " " + dest_file)
return True
def copy_file_new_name_if_exists(src_full_name, dest_dir, dest_file):
"""copy src_full_name (including dir + file name) to dest_dir/dest_file
will be used when renaming is necessary"""
if not os.path.isfile(src_full_name):
print "file " + src_full_name + " not found"
return False
dest_full_name = dest_dir + "/" + dest_file
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
print "copy file ", src_full_name, " to ", dest_full_name
os.system("cp -a " + src_full_name + " " + dest_full_name)
return True
def list_files(dir_name, dir_exclusion_filter = ""):
"""recursively list all files under given dir_name directory.
exluding subdirs ending with dir_exlusion_filter in name
returns list of files which can be [] if there is no file"""
file_list = []
if dir_exclusion_filter != "" and dir_name.endswith(dir_exclusion_filter):
return file_list
for item in os.listdir(dir_name):
item_full_path = dir_name + "/" + item
# do not include symbolic link to recursion
if os.path.islink(item_full_path) or os.path.isfile(item_full_path):
file_list.append(item_full_path)
elif os.path.isdir(item_full_path):
result_list = list_files(item_full_path, dir_exclusion_filter)
for file_name in result_list:
file_list.append(file_name)
return file_list
def src_newer_than_dest(src, dest):
"""return True if src file/dir is newer than dest file/dir"""
result = True
src_mod_time = os.path.getmtime(src)
if os.path.isfile(dest) or os.path.isdir(dest):
dest_mod_time = os.path.getmtime(dest)
if dest_mod_time > src_mod_time:
result = False
return result
def remove_if_exists(entry):
if os.path.exists(entry):
os.system("rm -rf " + entry)
def list_files_in_zip(zip_file_path, no_directory = True):
""" list all files/directories inside the given zip_file_path.
Directories are not included if no_directory is True"""
entry_list = []
if not zipfile.is_zipfile(zip_file_path):
return entry_list
zip_file = zipfile.ZipFile(zip_file_path, 'r')
entries = zip_file.namelist()
for entry in entries:
if not no_directory or not entry.endswith("/"):
entry_list.append(entry)
#print entry_list
return entry_list
def save_list(list_to_save, file_name):
f = open(file_name, "w")
for entry in list_to_save:
f.write("%s\n" % entry)
f.close()
def load_list(file_name):
result = []
if not os.path.isfile(file_name):
return result
for line in open(file_name, "r"):
result.append(line.strip())
#print result
return result
def remove_files_listed(top_dir, files_list):
top_dir_ = top_dir + "/"
for entry in files_list:
path = top_dir_ + entry
print "remove " + path
os.system("rm -f " + path)
def execute_command(command, error_msg):
if os.system(command) != 0:
raise RuntimeError(error_msg)
```
#### File: pdk/build/prepare_pdk_tree.py
```python
import os
import re
import sys
import subprocess
class ManifestHandler(object):
def __init__(self):
# current pattern
self.current = 0
self.patterns = [re.compile('path=\"([^\"]*)\".*groups=\"([^\"]*)\"'), \
re.compile('groups=\"([^\"]*)\".*path=\"([^\"]*)\"')]
def getAttribs(self, line):
attrib = [None, None] # list of path, groups
m = self.patterns[self.current].search(line)
# if match fails, try both pattens and change default one
# if match founds
if m is None:
notCurrent = 1 - self.current
mOther = self.patterns[notCurrent].search(line)
if mOther is not None:
# toggle
self.current = notCurrent
m = mOther
if m is not None:
if (self.current == 0):
attrib[0] = m.group(1)
attrib[1] = m.group(2)
else:
attrib[0] = m.group(2)
attrib[1] = m.group(1)
return attrib
def isInGroups(groupsAttrib, groups):
if groupsAttrib is None:
return False
groupsAttribList = groupsAttrib.split(',')
for group in groups:
if group in groupsAttribList:
return True
return False
def getPDKDirs(manifest, groups):
subdirs = []
handler = ManifestHandler()
f = open(manifest, 'r')
for line in f:
[path, groupsAttrib] = handler.getAttribs(line)
if isInGroups(groupsAttrib, groups):
subdirs.append(path)
f.close()
return subdirs
def create_symbolic_link(src_top, dest_top, dir_name):
src_full = src_top + "/" + dir_name
dest_full = dest_top + "/" + dir_name
#print "create symbolic link from " + dest_full + " to " + src_full
# remove existing link first to prevent recursive loop
os.system("rm -rf " + dest_full)
os.system("ln -s " + src_full + " " + dest_full)
# The only file not from manifest.
copy_files_list = [ "Makefile" ]
MOUNT_FILE = 'mount_pdk.sh'
UMOUNT_FILE = 'umount_pdk.sh'
SH_HEADER = "#!/bin/bash\n#Auto-generated file, do not edit!\n"
def main(argv):
manifestFile = ".repo/manifest.xml"
groups = ["pdk"]
if len(argv) < 2:
print "create_pdk_tree.py target_dir [-m manifest] [-a dir_to_add] pdk_groups"
print " ex) create_pdk_tree.py ../tmp grouper"
print " -a option is to include a directory which does not belong to specified group"
print " multiple -a options can be specified like -a frameworks/base -a external/aaa"
print " Note that pdk group is included by default"
print " Do not create target_dir under the current source tree. This will cause build error."
sys.exit(1)
targetDir = argv[1]
argc = 2
subdirs = []
if len(argv) > 2:
if argv[2] == "-m":
manifestFile = argv[3]
argc += 2
while argc < len(argv):
if argv[argc] == "-a":
argc += 1
subdirs.append(argv[argc])
else:
groups.append(argv[argc])
argc += 1
sourceDir = os.path.abspath('.')
targetDir = os.path.abspath(targetDir)
p = subprocess.Popen("mount", stdout = subprocess.PIPE)
targetMounted = False
for line in p.stdout:
if targetDir in line:
targetMounted = True
p.stdout.close()
if targetMounted:
print "target dir already mounted"
if os.path.exists(targetDir + '/' + UMOUNT_FILE):
print "Use existing file", UMOUNT_FILE, "to unmount"
sys.exit(1)
else:
print "Will create scripts, but may need manual unmount"
subdirs += getPDKDirs(manifestFile, groups)
print subdirs
os.system("mkdir -p " + targetDir)
mountf = open(targetDir + '/' + MOUNT_FILE, 'w+')
mountf.write(SH_HEADER)
umountf = open(targetDir + '/' + UMOUNT_FILE, 'w+')
umountf.write(SH_HEADER)
for subdir in subdirs:
os.system("mkdir -p " + targetDir + '/' + subdir)
mountf.write("mount --bind " + sourceDir + "/" + subdir + " " + targetDir + "/" + subdir + \
"\n")
umountf.write("umount " + targetDir + "/" + subdir + "\n")
for file_name in copy_files_list:
create_symbolic_link(sourceDir, targetDir, file_name)
mountf.close()
umountf.close()
os.system("chmod 700 " + targetDir + '/' + MOUNT_FILE)
os.system("chmod 700 " + targetDir + '/' + UMOUNT_FILE)
if __name__ == '__main__':
main(sys.argv)
``` |
{
"source": "jiangsichu/mobly",
"score": 2
} |
#### File: tests/mobly/utils_test.py
```python
import io
import mock
import os
import platform
import shutil
import socket
import subprocess
import tempfile
import time
from future.tests.base import unittest
import portpicker
import psutil
from mobly import utils
MOCK_AVAILABLE_PORT = 5
class UtilsTest(unittest.TestCase):
"""This test class has unit tests for the implementation of everything
under mobly.utils.
"""
def setUp(self):
system = platform.system()
self.sleep_cmd = 'timeout' if system == 'Windows' else 'sleep'
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_start_standing_subproc(self):
try:
p = utils.start_standing_subprocess([self.sleep_cmd, '0.1'])
p1 = psutil.Process(p.pid)
self.assertTrue(p1.is_running())
finally:
p.stdout.close()
p.stderr.close()
p.wait()
@mock.patch('subprocess.Popen')
def test_start_standing_subproc_without_env(self, mock_Popen):
p = utils.start_standing_subprocess(self.sleep_cmd)
mock_Popen.assert_called_with(
self.sleep_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
env=None,
)
@mock.patch('subprocess.Popen')
def test_start_standing_subproc_with_custom_env(self, mock_Popen):
mock_env = mock.MagicMock(spec=dict)
p = utils.start_standing_subprocess(self.sleep_cmd, env=mock_env)
mock_Popen.assert_called_with(
self.sleep_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
env=mock_env,
)
def test_stop_standing_subproc(self):
p = utils.start_standing_subprocess([self.sleep_cmd, '4'])
p1 = psutil.Process(p.pid)
utils.stop_standing_subprocess(p)
self.assertFalse(p1.is_running())
def test_stop_standing_subproc_wihtout_pipe(self):
p = subprocess.Popen([self.sleep_cmd, '4'])
self.assertIsNone(p.stdout)
p1 = psutil.Process(p.pid)
utils.stop_standing_subprocess(p)
self.assertFalse(p1.is_running())
def test_create_dir(self):
new_path = os.path.join(self.tmp_dir, 'haha')
self.assertFalse(os.path.exists(new_path))
utils.create_dir(new_path)
self.assertTrue(os.path.exists(new_path))
def test_create_dir_already_exists(self):
self.assertTrue(os.path.exists(self.tmp_dir))
utils.create_dir(self.tmp_dir)
self.assertTrue(os.path.exists(self.tmp_dir))
@mock.patch(
'mobly.controllers.android_device_lib.adb.list_occupied_adb_ports')
@mock.patch('portpicker.PickUnusedPort', return_value=MOCK_AVAILABLE_PORT)
def test_get_available_port_positive(self, mock_list_occupied_adb_ports,
mock_pick_unused_port):
self.assertEqual(utils.get_available_host_port(), MOCK_AVAILABLE_PORT)
@mock.patch(
'mobly.controllers.android_device_lib.adb.list_occupied_adb_ports',
return_value=[MOCK_AVAILABLE_PORT])
@mock.patch('portpicker.PickUnusedPort', return_value=MOCK_AVAILABLE_PORT)
def test_get_available_port_negative(self, mock_list_occupied_adb_ports,
mock_pick_unused_port):
with self.assertRaisesRegex(utils.Error, 'Failed to find.* retries'):
utils.get_available_host_port()
@mock.patch(
'mobly.controllers.android_device_lib.adb.list_occupied_adb_ports')
def test_get_available_port_returns_free_port(
self, mock_list_occupied_adb_ports):
"""Verifies logic to pick a free port on the host.
Test checks we can bind to either an ipv4 or ipv6 socket on the port
returned by get_available_host_port.
"""
port = utils.get_available_host_port()
got_socket = False
for family in (socket.AF_INET, socket.AF_INET6):
try:
s = socket.socket(family, socket.SOCK_STREAM)
got_socket = True
break
except socket.error:
continue
self.assertTrue(got_socket)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(('localhost', port))
finally:
s.close()
def test_load_file_to_base64_str_reads_bytes_file_as_base64_string(self):
tmp_file_path = os.path.join(self.tmp_dir, 'b64.bin')
expected_base64_encoding = u'SGVsbG93IHdvcmxkIQ=='
with io.open(tmp_file_path, 'wb') as f:
f.write(b'Hellow world!')
self.assertEqual(
utils.load_file_to_base64_str(tmp_file_path),
expected_base64_encoding)
def test_load_file_to_base64_str_reads_text_file_as_base64_string(self):
tmp_file_path = os.path.join(self.tmp_dir, 'b64.bin')
expected_base64_encoding = u'SGVsbG93IHdvcmxkIQ=='
with io.open(tmp_file_path, 'w', encoding='utf-8') as f:
f.write(u'Hellow world!')
self.assertEqual(
utils.load_file_to_base64_str(tmp_file_path),
expected_base64_encoding)
def test_load_file_to_base64_str_reads_unicode_file_as_base64_string(self):
tmp_file_path = os.path.join(self.tmp_dir, 'b64.bin')
expected_base64_encoding = u'6YCa'
with io.open(tmp_file_path, 'w', encoding='utf-8') as f:
f.write(u'\u901a')
self.assertEqual(
utils.load_file_to_base64_str(tmp_file_path),
expected_base64_encoding)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jiangsinan/swiper",
"score": 2
} |
#### File: swiper/common/middleware.py
```python
from django.utils.deprecation import MiddlewareMixin
from common import errors
from common.errors import LogicException, LogicError
from lib.http import render_json
from user.models import Users
class AuthMiddleware(MiddlewareMixin):
WHITE_LIST = [
'/api/user/verify-phone',
'/api/user/login',
]
def process_request(self,request):
if request.path in self.WHITE_LIST:
return None
uid = request.session.get('uid')
if uid is None:
return render_json(code=errors.LOGIN_REQUIRED)
request.user = Users.objects.get(id=uid)
class LogicExceptionMiddleware(MiddlewareMixin):
def process_exception(self, request, exception):
if isinstance(exception, (LogicException, LogicError)):
return render_json(code=exception.code)
```
#### File: swiper/lib/http.py
```python
from django.conf import settings
from django.http import JsonResponse
from common import errors
def render_json(code=errors.OK,data=None):
result = {
'code':code
}
if data:
result['data'] = data
if settings.DEBUG:
json_dump_params = {'indent':4,'ensure_ascii':False}
else:
json_dump_params = {'separators':(':',',')}
return JsonResponse(result,safe=False,json_dumps_params=json_dump_params)
```
#### File: swiper/lib/sms.py
```python
import requests
from common import config
def send(phone_num,code):
params = config.YZX_SMS_PARAMS.copy()
print(type(phone_num),type(code))
params['mobile']=phone_num
params['param']=code
print(phone_num,code)
resp = requests.post(config.YZX_SMS_URL,json=params)
print(resp.status_code)
if resp.status_code==200:
result = resp.json()
if result.get('code')=='000000':
return True
return False
``` |
{
"source": "jiangsutx/SPMC_VideoSR",
"score": 2
} |
#### File: jiangsutx/SPMC_VideoSR/main_videosr_deploy_4x3f.py
```python
import os
import time
import glob
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.ops import control_flow_ops
import scipy.misc
import random
import subprocess
from datetime import datetime
from math import ceil
# from modules import BasicConvLSTMCell
# from modules.model_easyflow import *
from modules.videosr_ops_lite import *
os.environ["CUDA_VISIBLE_DEVICES"]=str(np.argmax( [int(x.split()[2]) for x in subprocess.Popen("nvidia-smi -q -d Memory | grep -A4 GPU | grep Free", shell=True, stdout=subprocess.PIPE).stdout.readlines()]))
DATA_TEST='./data/test/calendar'
# DATA_TEST='./data/test/hitachi_isee5_001'
DATA_TRAIN='./data/train/'
class VIDEOSR(object):
def __init__(self):
self.num_frames = 3
self.scale_factor = 4
def test(self, dataPath=None, scale_factor=4, num_frames=3):
import scipy.misc
dataPath = DATA_TEST
inList = sorted(glob.glob(os.path.join(dataPath, 'input{}/*.png').format(scale_factor)))
inp = [scipy.misc.imread(i).astype(np.float32) / 255.0 for i in inList]
# inp = [scipy.misc.imresize(i, [120, 160]) / 255.0 for i in inp]
inp = [i[:120, :160, :] for i in inp]
print 'Testing path: {}'.format(dataPath)
print '# of testing frames: {}'.format(len(inList))
DATA_TEST_OUT = DATA_TEST+'_SR_{}'.format(datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
os.mkdir(DATA_TEST_OUT)
cnt = 0
self.scale_factor = scale_factor
reuse = False
for idx0 in xrange(len(inList)):
cnt += 1
T = num_frames / 2
imgs = [inp[0] for i in xrange(idx0 - T, 0)]
imgs.extend([inp[i] for i in xrange(max(0, idx0 - T), idx0)])
imgs.extend([inp[i] for i in xrange(idx0, min(len(inList), idx0 + T + 1))])
imgs.extend([inp[-1] for i in xrange(idx0 + T, len(inList) - 1, -1)])
dims = imgs[0].shape
if len(dims) == 2:
imgs = [np.expand_dims(i, -1) for i in imgs]
h, w, c = imgs[0].shape
out_h = h * scale_factor
out_w = w * scale_factor
padh = int(ceil(h / 4.0) * 4.0 - h)
padw = int(ceil(w / 4.0) * 4.0 - w)
imgs = [np.pad(i, [[0, padh], [0, padw], [0, 0]], 'edge') for i in imgs]
imgs = np.expand_dims(np.stack(imgs, axis=0), 0)
if idx0 == 0:
frames_lr = tf.placeholder(dtype=tf.float32, shape=imgs.shape)
frames_ref_ycbcr = rgb2ycbcr(frames_lr[:, T:T + 1, :, :, :])
frames_ref_ycbcr = tf.tile(frames_ref_ycbcr, [1, num_frames, 1, 1, 1])
with open('spmc_120_160_4x3f.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
output = tf.import_graph_def(graph_def, input_map={'Placeholder:0': frames_lr}, return_elements=['output:0'])
output = output[0]
print(output.get_shape())
if len(dims) == 3:
output_rgb = ycbcr2rgb(tf.concat([output, resize_images(frames_ref_ycbcr,
[(h + padh) * scale_factor,
(w + padw) * scale_factor],
method=2)[:, :, :, :, 1:3]], -1))
else:
output_rgb = output
output = output[:, :, :out_h, :out_w, :]
output_rgb = output_rgb[:, :, :out_h, :out_w, :]
if cnt == 1:
sess = tf.Session()
reuse = True
case_path = dataPath.split('/')[-1]
print 'Testing - ', case_path, len(imgs)
[imgs_hr, imgs_hr_rgb] = sess.run([output, output_rgb], feed_dict={frames_lr: imgs})
scipy.misc.imsave(os.path.join(DATA_TEST_OUT, 'y_%03d.png'%(idx0)),
im2uint8(imgs_hr[0, -1, :, :, 0]))
if len(dims) == 3:
scipy.misc.imsave(os.path.join(DATA_TEST_OUT, 'rgb_%03d.png'%(idx0)),
im2uint8(imgs_hr_rgb[0, -1, :, :, :]))
print 'SR results path: {}'.format(DATA_TEST_OUT)
def main(_):
model = VIDEOSR()
model.test()
if __name__ == '__main__':
tf.app.run()
``` |
{
"source": "jiangsy/h-baselines",
"score": 2
} |
#### File: hbaselines/utils/train.py
```python
import argparse
from hbaselines.algorithms.off_policy import TD3_PARAMS
from hbaselines.algorithms.off_policy import SAC_PARAMS
from hbaselines.algorithms.off_policy import FEEDFORWARD_PARAMS
from hbaselines.algorithms.off_policy import GOAL_CONDITIONED_PARAMS
from hbaselines.algorithms.utils import is_sac_policy, is_td3_policy
from hbaselines.algorithms.utils import is_goal_conditioned_policy
from hbaselines.algorithms.utils import is_multiagent_policy
def get_hyperparameters(args, policy):
"""Return the hyperparameters of a training algorithm from the parser."""
algorithm_params = {
"nb_train_steps": args.nb_train_steps,
"nb_rollout_steps": args.nb_rollout_steps,
"nb_eval_episodes": args.nb_eval_episodes,
"actor_update_freq": args.actor_update_freq,
"meta_update_freq": args.meta_update_freq,
"reward_scale": args.reward_scale,
"render": args.render,
"render_eval": args.render_eval,
"save_replay_buffer": args.save_replay_buffer,
"verbose": args.verbose,
"num_envs": args.num_envs,
"_init_setup_model": True,
}
# add FeedForwardPolicy parameters
policy_kwargs = {
"buffer_size": args.buffer_size,
"batch_size": args.batch_size,
"actor_lr": args.actor_lr,
"critic_lr": args.critic_lr,
"tau": args.tau,
"gamma": args.gamma,
"use_huber": args.use_huber,
"model_params": {
"model_type": getattr(args, "model_params:model_type"),
"layer_norm": getattr(args, "model_params:layer_norm"),
"ignore_image": getattr(args, "model_params:ignore_image"),
"image_height": getattr(args, "model_params:image_height"),
"image_width": getattr(args, "model_params:image_width"),
"image_channels": getattr(args, "model_params:image_channels"),
"ignore_flat_channels":
getattr(args, "model_params:ignore_flat_channels") or
FEEDFORWARD_PARAMS["model_params"]["ignore_flat_channels"],
"filters":
getattr(args, "model_params:filters") or
FEEDFORWARD_PARAMS["model_params"]["filters"],
"kernel_sizes":
getattr(args, "model_params:kernel_sizes") or
FEEDFORWARD_PARAMS["model_params"]["kernel_sizes"],
"strides":
getattr(args, "model_params:strides") or
FEEDFORWARD_PARAMS["model_params"]["strides"],
}
}
# add TD3 parameters
if is_td3_policy(policy):
policy_kwargs.update({
"noise": args.noise,
"target_policy_noise": args.target_policy_noise,
"target_noise_clip": args.target_noise_clip,
})
# add SAC parameters
if is_sac_policy(policy):
policy_kwargs.update({
"target_entropy": args.target_entropy,
})
# add GoalConditionedPolicy parameters
if is_goal_conditioned_policy(policy):
policy_kwargs.update({
"num_levels": args.num_levels,
"meta_period": args.meta_period,
"intrinsic_reward_type": args.intrinsic_reward_type,
"intrinsic_reward_scale": args.intrinsic_reward_scale,
"relative_goals": args.relative_goals,
"off_policy_corrections": args.off_policy_corrections,
"hindsight": args.hindsight,
"subgoal_testing_rate": args.subgoal_testing_rate,
"cooperative_gradients": args.cooperative_gradients,
"cg_weights": args.cg_weights,
})
# add MultiActorCriticPolicy parameters
if is_multiagent_policy(policy):
policy_kwargs.update({
"shared": args.shared,
"maddpg": args.maddpg,
})
# add the policy_kwargs term to the algorithm parameters
algorithm_params['policy_kwargs'] = policy_kwargs
return algorithm_params
def parse_options(description, example_usage, args):
"""Parse training options user can specify in command line.
Parameters
----------
description : str
the description of the script using this parser
example_usage : str
an example of the runner script being used
args : list of str
command-line arguments
Returns
-------
argparse.Namespace
the output parser object
"""
parser = argparse.ArgumentParser(
description=description, epilog=example_usage)
# required input parameters
parser.add_argument(
'env_name', type=str,
help='Name of the gym environment. This environment must either be '
'registered in gym, be available in the computation framework '
'Flow, or be available within the hbaselines/envs folder.')
# optional input parameters
parser.add_argument(
'--alg', type=str, default='TD3',
help='The algorithm to use. Must be one of [TD3, SAC].')
parser.add_argument(
'--evaluate', action='store_true',
help='add an evaluation environment')
parser.add_argument(
'--n_training', type=int, default=1,
help='Number of training operations to perform. Each training '
'operation is performed on a new seed. Defaults to 1.')
parser.add_argument(
'--total_steps', type=int, default=1000000,
help='Total number of timesteps used during training.')
parser.add_argument(
'--seed', type=int, default=1,
help='Sets the seed for numpy, tensorflow, and random.')
parser.add_argument(
'--log_interval', type=int, default=2000,
help='the number of training steps before logging training results')
parser.add_argument(
'--eval_interval', type=int, default=50000,
help='number of simulation steps in the training environment before '
'an evaluation is performed')
parser.add_argument(
'--save_interval', type=int, default=50000,
help='number of simulation steps in the training environment before '
'the model is saved')
parser.add_argument(
'--initial_exploration_steps', type=int, default=10000,
help='number of timesteps that the policy is run before training to '
'initialize the replay buffer with samples')
parser.add_argument(
'--dir_name', type=str, default='',
help='an optional directory to save the current experiment '
'to or load an existing one from')
# algorithm-specific hyperparameters
parser = create_algorithm_parser(parser)
parser = create_td3_parser(parser)
parser = create_sac_parser(parser)
parser = create_feedforward_parser(parser)
parser = create_goal_conditioned_parser(parser)
parser = create_multi_feedforward_parser(parser)
flags, _ = parser.parse_known_args(args)
return flags
def create_algorithm_parser(parser):
"""Add the algorithm hyperparameters to the parser."""
parser.add_argument(
'--nb_train_steps', type=int, default=1,
help='the number of training steps')
parser.add_argument(
'--nb_rollout_steps', type=int, default=1,
help='the number of rollout steps')
parser.add_argument(
'--nb_eval_episodes', type=int, default=50,
help='the number of evaluation episodes')
parser.add_argument(
'--reward_scale', type=float, default=1,
help='the value the reward should be scaled by')
parser.add_argument(
'--render', action='store_true',
help='enable rendering of the environment')
parser.add_argument(
'--render_eval', action='store_true',
help='enable rendering of the evaluation environment')
parser.add_argument(
'--save_replay_buffer', action='store_true',
help='whether to save the data from the replay buffer, at the '
'frequency that the model is saved. Only the most recent replay '
'buffer is stored.')
parser.add_argument(
'--num_envs', type=int, default=1,
help='number of environments used to run simulations in parallel. '
'Each environment is run on a separate CPUS and uses the same '
'policy as the rest. Must be less than or equal to '
'nb_rollout_steps.')
parser.add_argument(
'--verbose', type=int, default=2,
help='the verbosity level: 0 none, 1 training information, '
'2 tensorflow debug')
parser.add_argument(
'--actor_update_freq', type=int, default=2,
help='number of training steps per actor policy update step. The '
'critic policy is updated every training step.')
parser.add_argument(
'--meta_update_freq', type=int, default=10,
help='number of training steps per meta policy update step. The actor '
'policy of the meta-policy is further updated at the frequency '
'provided by the actor_update_freq variable. Note that this value'
' is only relevant when using the GoalConditionedPolicy policy.')
return parser
def create_td3_parser(parser):
"""Add the TD3 hyperparameters to the parser."""
parser.add_argument(
"--noise",
type=float,
default=TD3_PARAMS["noise"],
help="scaling term to the range of the action space, that is "
"subsequently used as the standard deviation of Gaussian noise "
"added to the action if `apply_noise` is set to True in "
"`get_action`")
parser.add_argument(
"--target_policy_noise",
type=float,
default=TD3_PARAMS["target_policy_noise"],
help="standard deviation term to the noise from the output of the "
"target actor policy. See TD3 paper for more.")
parser.add_argument(
"--target_noise_clip",
type=float,
default=TD3_PARAMS["target_noise_clip"],
help="clipping term for the noise injected in the target actor policy")
return parser
def create_sac_parser(parser):
"""Add the SAC hyperparameters to the parser."""
parser.add_argument(
"--target_entropy",
type=float,
default=SAC_PARAMS["target_entropy"],
help="target entropy used when learning the entropy coefficient. If "
"set to None, a heuristic value is used.")
return parser
def create_feedforward_parser(parser):
"""Add the feedforward policy hyperparameters to the parser."""
parser.add_argument(
"--buffer_size",
type=int,
default=FEEDFORWARD_PARAMS["buffer_size"],
help="the max number of transitions to store")
parser.add_argument(
"--batch_size",
type=int,
default=FEEDFORWARD_PARAMS["batch_size"],
help="the size of the batch for learning the policy")
parser.add_argument(
"--actor_lr",
type=float,
default=FEEDFORWARD_PARAMS["actor_lr"],
help="the actor learning rate")
parser.add_argument(
"--critic_lr",
type=float,
default=FEEDFORWARD_PARAMS["critic_lr"],
help="the critic learning rate")
parser.add_argument(
"--tau",
type=float,
default=FEEDFORWARD_PARAMS["tau"],
help="the soft update coefficient (keep old values, between 0 and 1)")
parser.add_argument(
"--gamma",
type=float,
default=FEEDFORWARD_PARAMS["gamma"],
help="the discount rate")
parser.add_argument(
"--use_huber",
action="store_true",
help="specifies whether to use the huber distance function as the "
"loss for the critic. If set to False, the mean-squared error "
"metric is used instead")
parser.add_argument(
"--model_params:model_type",
type=str,
default=FEEDFORWARD_PARAMS["model_params"]["model_type"],
help="the type of model to use. Must be one of {\"fcnet\", \"conv\"}.")
parser.add_argument(
"--model_params:layer_norm",
action="store_true",
help="enable layer normalisation")
parser.add_argument(
"--model_params:ignore_flat_channels",
type=int,
nargs="+",
help="specifies which channels of the observation to ignore")
parser.add_argument(
"--model_params:ignore_image",
action="store_true",
help="specifies whether the image in the observation "
"should be ignored and removed")
parser.add_argument(
"--model_params:image_height",
type=int,
default=FEEDFORWARD_PARAMS["model_params"]["image_height"],
help="the height of the image observation")
parser.add_argument(
"--model_params:image_width",
type=int,
default=FEEDFORWARD_PARAMS["model_params"]["image_width"],
help="the width of the image observation")
parser.add_argument(
"--model_params:image_channels",
type=int,
default=FEEDFORWARD_PARAMS["model_params"]["image_channels"],
help="the number of channels of the image observation")
parser.add_argument(
"--model_params:filters",
type=int,
nargs="+",
help="specifies the convolutional filters per layer")
parser.add_argument(
"--model_params:kernel_sizes",
type=int,
nargs="+",
help="specifies the convolutional kernel sizes per layer")
parser.add_argument(
"--model_params:strides",
type=int,
nargs="+",
help="specifies the convolutional strides per layer")
return parser
def create_goal_conditioned_parser(parser):
"""Add the goal-conditioned policy hyperparameters to the parser."""
parser.add_argument(
"--num_levels",
type=int,
default=GOAL_CONDITIONED_PARAMS["num_levels"],
help="number of levels within the hierarchy. Must be greater than 1. "
"Two levels correspond to a Manager/Worker paradigm.")
parser.add_argument(
"--meta_period",
type=int,
default=GOAL_CONDITIONED_PARAMS["meta_period"],
help="meta-policy action period")
parser.add_argument(
"--intrinsic_reward_type",
type=str,
default=GOAL_CONDITIONED_PARAMS["intrinsic_reward_type"],
help="the reward function to be used by the lower-level policies. See "
"the base goal-conditioned policy for a description.")
parser.add_argument(
"--intrinsic_reward_scale",
type=float,
default=GOAL_CONDITIONED_PARAMS["intrinsic_reward_scale"],
help="the value that the intrinsic reward should be scaled by")
parser.add_argument(
"--relative_goals",
action="store_true",
help="specifies whether the goal issued by the higher-level policies "
"is meant to be a relative or absolute goal, i.e. specific state "
"or change in state")
parser.add_argument(
"--off_policy_corrections",
action="store_true",
help="whether to use off-policy corrections during the update "
"procedure. See: https://arxiv.org/abs/1805.08296")
parser.add_argument(
"--hindsight",
action="store_true",
help="whether to include hindsight action and goal transitions in the "
"replay buffer. See: https://arxiv.org/abs/1712.00948")
parser.add_argument(
"--subgoal_testing_rate",
type=float,
default=GOAL_CONDITIONED_PARAMS["subgoal_testing_rate"],
help="rate at which the original (non-hindsight) sample is stored in "
"the replay buffer as well. Used only if `hindsight` is set to "
"True.")
parser.add_argument(
"--cooperative_gradients",
action="store_true",
help="whether to use the cooperative gradient update procedure for the"
" higher-level policy. See: https://arxiv.org/abs/1912.02368v1")
parser.add_argument(
"--cg_weights",
type=float,
default=GOAL_CONDITIONED_PARAMS["cg_weights"],
help="weights for the gradients of the loss of the lower-level "
"policies with respect to the parameters of the higher-level "
"policies. Only used if `cooperative_gradients` is set to True.")
return parser
def create_multi_feedforward_parser(parser):
"""Add the multi-agent policy hyperparameters to the parser."""
parser.add_argument(
"--shared",
action="store_true",
help="whether to use a shared policy for all agents")
parser.add_argument(
"--maddpg",
action="store_true",
help="whether to use an algorithm-specific variant of the MADDPG "
"algorithm")
return parser
```
#### File: tests/fast_tests/test_multiagent.py
```python
import unittest
import numpy as np
import tensorflow as tf
from gym.spaces import Box
from hbaselines.utils.tf_util import get_trainable_vars
from hbaselines.multiagent.td3 import MultiFeedForwardPolicy as \
TD3MultiFeedForwardPolicy
from hbaselines.multiagent.sac import MultiFeedForwardPolicy as \
SACMultiFeedForwardPolicy
from hbaselines.multiagent.h_td3 import MultiGoalConditionedPolicy as \
TD3MultiGoalConditionedPolicy
from hbaselines.multiagent.h_sac import MultiGoalConditionedPolicy as \
SACMultiGoalConditionedPolicy
from hbaselines.algorithms.off_policy import SAC_PARAMS
from hbaselines.algorithms.off_policy import TD3_PARAMS
from hbaselines.algorithms.off_policy import MULTIAGENT_PARAMS
from hbaselines.algorithms.off_policy import GOAL_CONDITIONED_PARAMS
class TestMultiActorCriticPolicy(unittest.TestCase):
"""Test MultiActorCriticPolicy in hbaselines/multiagent/base.py."""
def setUp(self):
self.sess = tf.compat.v1.Session()
# Shared policy parameters
self.policy_params_shared = {
'sess': self.sess,
'ac_space': Box(low=-1, high=1, shape=(1,)),
'co_space': Box(low=-2, high=2, shape=(2,)),
'ob_space': Box(low=-3, high=3, shape=(3,)),
'all_ob_space': Box(low=-3, high=3, shape=(10,)),
'verbose': 0,
}
self.policy_params_shared.update(TD3_PARAMS.copy())
self.policy_params_shared.update(MULTIAGENT_PARAMS.copy())
self.policy_params_shared['shared'] = True
# Independent policy parameters
self.policy_params_independent = {
'sess': self.sess,
'ac_space': {
'a': Box(low=-1, high=1, shape=(1,)),
'b': Box(low=-2, high=2, shape=(2,)),
},
'co_space': {
'a': Box(low=-3, high=3, shape=(3,)),
'b': Box(low=-4, high=4, shape=(4,)),
},
'ob_space': {
'a': Box(low=-5, high=5, shape=(5,)),
'b': Box(low=-6, high=6, shape=(6,)),
},
'all_ob_space': Box(low=-6, high=6, shape=(18,)),
'verbose': 0,
}
self.policy_params_independent.update(TD3_PARAMS.copy())
self.policy_params_independent.update(MULTIAGENT_PARAMS.copy())
self.policy_params_independent['shared'] = False
def tearDown(self):
self.sess.close()
del self.policy_params_shared
del self.policy_params_independent
# Clear the graph.
tf.compat.v1.reset_default_graph()
def test_store_transition_1(self):
"""Check the functionality of the store_transition() method.
This test checks for the following cases:
1. maddpg = False, shared = False
2. maddpg = False, shared = True
"""
policy_params = self.policy_params_independent.copy()
policy_params["maddpg"] = False
policy = TD3MultiFeedForwardPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
for i in range(4):
action_0 = np.array([i for _ in range(1)])
action_1 = np.array([i for _ in range(2)])
context0_0 = np.array([i for _ in range(3)])
context0_1 = np.array([i for _ in range(4)])
obs0_0 = np.array([i for _ in range(5)])
obs0_1 = np.array([i for _ in range(6)])
reward = i
obs1_0 = np.array([i+1 for _ in range(5)])
obs1_1 = np.array([i+1 for _ in range(6)])
context1_0 = np.array([i for _ in range(3)])
context1_1 = np.array([i for _ in range(4)])
done = False
is_final_step = False
evaluate = False
policy.store_transition(
obs0={"a": obs0_0, "b": obs0_1},
context0={"a": context0_0, "b": context0_1},
action={"a": action_0, "b": action_1},
reward={"a": reward, "b": reward},
obs1={"a": obs1_0, "b": obs1_1},
context1={"a": context1_0, "b": context1_1},
done=done,
is_final_step=is_final_step,
evaluate=evaluate,
env_num=0,
)
# =================================================================== #
# test for agent a #
# =================================================================== #
obs_t = policy.agents["a"].replay_buffer.obs_t
action_t = policy.agents["a"].replay_buffer.action_t
reward = policy.agents["a"].replay_buffer.reward
done = policy.agents["a"].replay_buffer.done
# check the various attributes
np.testing.assert_almost_equal(
obs_t[:4, :],
np.array([[0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2.],
[3., 3., 3., 3., 3., 3., 3., 3.]])
)
np.testing.assert_almost_equal(
action_t[:4, :],
np.array([[0.], [1.], [2.], [3.]])
)
np.testing.assert_almost_equal(
reward[:4],
np.array([0., 1., 2., 3.])
)
np.testing.assert_almost_equal(
done[:4],
[0., 0., 0., 0.]
)
# =================================================================== #
# test for agent b #
# =================================================================== #
obs_t = policy.agents["b"].replay_buffer.obs_t
action_t = policy.agents["b"].replay_buffer.action_t
reward = policy.agents["b"].replay_buffer.reward
done = policy.agents["b"].replay_buffer.done
# check the various attributes
np.testing.assert_almost_equal(
obs_t[:4, :],
np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 2.],
[3., 3., 3., 3., 3., 3., 3., 3., 3., 3.]])
)
np.testing.assert_almost_equal(
action_t[:4, :],
np.array([[0., 0.], [1., 1.], [2., 2.], [3., 3.]])
)
np.testing.assert_almost_equal(
reward[:4],
np.array([0., 1., 2., 3.])
)
np.testing.assert_almost_equal(
done[:4],
[0., 0., 0., 0.]
)
def test_store_transition_2(self):
policy_params = self.policy_params_shared.copy()
policy_params["maddpg"] = False
policy = TD3MultiFeedForwardPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
for i in range(4):
obs0 = np.array([i for _ in range(2)])
context0 = np.array([i for _ in range(3)])
action = np.array([i for _ in range(1)])
reward = i
obs1 = np.array([i+1 for _ in range(2)])
context1 = np.array([i for _ in range(3)])
is_final_step = False
evaluate = False
policy.store_transition(
obs0={"a": obs0, "b": obs0 + 1},
context0={"a": context0, "b": context0 + 1},
action={"a": action, "b": action + 1},
reward={"a": reward, "b": reward + 1},
obs1={"a": obs1, "b": obs1 + 1},
context1={"a": context1, "b": context1 + 1},
done=0.,
is_final_step=is_final_step,
evaluate=evaluate,
env_num=0,
)
# extract the attributes
obs_t = policy.agents["policy"].replay_buffer.obs_t
action_t = policy.agents["policy"].replay_buffer.action_t
reward = policy.agents["policy"].replay_buffer.reward
done = policy.agents["policy"].replay_buffer.done
# check the various attributes
np.testing.assert_almost_equal(
obs_t[:8, :],
np.array([[0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2.],
[2., 2., 2., 2., 2.],
[3., 3., 3., 3., 3.],
[3., 3., 3., 3., 3.],
[4., 4., 4., 4., 4.]])
)
np.testing.assert_almost_equal(
action_t[:8, :],
np.array([[0.], [1.], [1.], [2.], [2.], [3.], [3.], [4.]])
)
np.testing.assert_almost_equal(
reward[:8],
np.array([0., 1., 1., 2., 2., 3., 3., 4.])
)
np.testing.assert_almost_equal(
done[:8],
[0., 0., 0., 0., 0., 0., 0., 0.]
)
class TestTD3MultiFeedForwardPolicy(unittest.TestCase):
"""Test MultiFeedForwardPolicy in hbaselines/multiagent/td3.py."""
def setUp(self):
self.sess = tf.compat.v1.Session()
# Shared policy parameters
self.policy_params_shared = {
'sess': self.sess,
'ac_space': Box(low=-1, high=1, shape=(1,)),
'co_space': Box(low=-2, high=2, shape=(2,)),
'ob_space': Box(low=-3, high=3, shape=(3,)),
'all_ob_space': Box(low=-3, high=3, shape=(10,)),
'verbose': 0,
}
self.policy_params_shared.update(TD3_PARAMS.copy())
self.policy_params_shared.update(MULTIAGENT_PARAMS.copy())
self.policy_params_shared['shared'] = True
self.policy_params_shared["model_params"]["model_type"] = "fcnet"
# Independent policy parameters
self.policy_params_independent = {
'sess': self.sess,
'ac_space': {
'a': Box(low=-1, high=1, shape=(1,)),
'b': Box(low=-2, high=2, shape=(2,)),
},
'co_space': {
'a': Box(low=-3, high=3, shape=(3,)),
'b': Box(low=-4, high=4, shape=(4,)),
},
'ob_space': {
'a': Box(low=-5, high=5, shape=(5,)),
'b': Box(low=-6, high=6, shape=(6,)),
},
'all_ob_space': Box(low=-6, high=6, shape=(18,)),
'verbose': 0,
}
self.policy_params_independent.update(TD3_PARAMS.copy())
self.policy_params_independent.update(MULTIAGENT_PARAMS.copy())
self.policy_params_independent['shared'] = False
self.policy_params_independent["model_params"]["model_type"] = "fcnet"
def tearDown(self):
self.sess.close()
del self.policy_params_shared
del self.policy_params_independent
# Clear the graph.
tf.compat.v1.reset_default_graph()
def test_deprecated(self):
"""Make sure that the original path still works (temporarily)."""
raised = False
try:
from hbaselines.multi_fcnet.td3 import MultiFeedForwardPolicy
policy_params = self.policy_params_independent.copy()
_ = MultiFeedForwardPolicy(**policy_params)
except ModuleNotFoundError: # pragma: no cover
raised = True # pragma: no cover
self.assertFalse(raised, 'Exception raised')
def test_init_1(self):
"""Check the functionality of the __init__() method.
This method is tested for the following features:
1. The proper structure graph was generated.
2. All input placeholders are correct.
This is done for the following cases:
1. maddpg = False, shared = False, model_type = "fcnet"
2. maddpg = False, shared = True, model_type = "fcnet"
3. maddpg = True, shared = False, model_type = "fcnet"
4. maddpg = True, shared = True, model_type = "fcnet"
5. maddpg = True, shared = False, model_type = "conv"
6. maddpg = True, shared = True, model_type = "conv"
"""
policy_params = self.policy_params_independent.copy()
policy_params["maddpg"] = False
policy = TD3MultiFeedForwardPolicy(**policy_params)
self.assertListEqual(
sorted([var.name for var in get_trainable_vars()]),
['a/model/pi/fc0/bias:0',
'a/model/pi/fc0/kernel:0',
'a/model/pi/fc1/bias:0',
'a/model/pi/fc1/kernel:0',
'a/model/pi/output/bias:0',
'a/model/pi/output/kernel:0',
'a/model/qf_0/fc0/bias:0',
'a/model/qf_0/fc0/kernel:0',
'a/model/qf_0/fc1/bias:0',
'a/model/qf_0/fc1/kernel:0',
'a/model/qf_0/qf_output/bias:0',
'a/model/qf_0/qf_output/kernel:0',
'a/model/qf_1/fc0/bias:0',
'a/model/qf_1/fc0/kernel:0',
'a/model/qf_1/fc1/bias:0',
'a/model/qf_1/fc1/kernel:0',
'a/model/qf_1/qf_output/bias:0',
'a/model/qf_1/qf_output/kernel:0',
'a/target/pi/fc0/bias:0',
'a/target/pi/fc0/kernel:0',
'a/target/pi/fc1/bias:0',
'a/target/pi/fc1/kernel:0',
'a/target/pi/output/bias:0',
'a/target/pi/output/kernel:0',
'a/target/qf_0/fc0/bias:0',
'a/target/qf_0/fc0/kernel:0',
'a/target/qf_0/fc1/bias:0',
'a/target/qf_0/fc1/kernel:0',
'a/target/qf_0/qf_output/bias:0',
'a/target/qf_0/qf_output/kernel:0',
'a/target/qf_1/fc0/bias:0',
'a/target/qf_1/fc0/kernel:0',
'a/target/qf_1/fc1/bias:0',
'a/target/qf_1/fc1/kernel:0',
'a/target/qf_1/qf_output/bias:0',
'a/target/qf_1/qf_output/kernel:0',
'b/model/pi/fc0/bias:0',
'b/model/pi/fc0/kernel:0',
'b/model/pi/fc1/bias:0',
'b/model/pi/fc1/kernel:0',
'b/model/pi/output/bias:0',
'b/model/pi/output/kernel:0',
'b/model/qf_0/fc0/bias:0',
'b/model/qf_0/fc0/kernel:0',
'b/model/qf_0/fc1/bias:0',
'b/model/qf_0/fc1/kernel:0',
'b/model/qf_0/qf_output/bias:0',
'b/model/qf_0/qf_output/kernel:0',
'b/model/qf_1/fc0/bias:0',
'b/model/qf_1/fc0/kernel:0',
'b/model/qf_1/fc1/bias:0',
'b/model/qf_1/fc1/kernel:0',
'b/model/qf_1/qf_output/bias:0',
'b/model/qf_1/qf_output/kernel:0',
'b/target/pi/fc0/bias:0',
'b/target/pi/fc0/kernel:0',
'b/target/pi/fc1/bias:0',
'b/target/pi/fc1/kernel:0',
'b/target/pi/output/bias:0',
'b/target/pi/output/kernel:0',
'b/target/qf_0/fc0/bias:0',
'b/target/qf_0/fc0/kernel:0',
'b/target/qf_0/fc1/bias:0',
'b/target/qf_0/fc1/kernel:0',
'b/target/qf_0/qf_output/bias:0',
'b/target/qf_0/qf_output/kernel:0',
'b/target/qf_1/fc0/bias:0',
'b/target/qf_1/fc0/kernel:0',
'b/target/qf_1/fc1/bias:0',
'b/target/qf_1/fc1/kernel:0',
'b/target/qf_1/qf_output/bias:0',
'b/target/qf_1/qf_output/kernel:0']
)
# Check observation/action/context spaces of the agents
self.assertEqual(policy.agents['a'].ac_space,
self.policy_params_independent['ac_space']['a'])
self.assertEqual(policy.agents['a'].ob_space,
self.policy_params_independent['ob_space']['a'])
self.assertEqual(policy.agents['a'].co_space,
self.policy_params_independent['co_space']['a'])
self.assertEqual(policy.agents['b'].ac_space,
self.policy_params_independent['ac_space']['b'])
self.assertEqual(policy.agents['b'].ob_space,
self.policy_params_independent['ob_space']['b'])
self.assertEqual(policy.agents['b'].co_space,
self.policy_params_independent['co_space']['b'])
# Check the instantiation of the class attributes.
self.assertTrue(not policy.shared)
self.assertTrue(not policy.maddpg)
def test_init_2(self):
policy_params = self.policy_params_shared.copy()
policy_params["maddpg"] = False
policy = TD3MultiFeedForwardPolicy(**policy_params)
self.assertListEqual(
sorted([var.name for var in get_trainable_vars()]),
['model/pi/fc0/bias:0',
'model/pi/fc0/kernel:0',
'model/pi/fc1/bias:0',
'model/pi/fc1/kernel:0',
'model/pi/output/bias:0',
'model/pi/output/kernel:0',
'model/qf_0/fc0/bias:0',
'model/qf_0/fc0/kernel:0',
'model/qf_0/fc1/bias:0',
'model/qf_0/fc1/kernel:0',
'model/qf_0/qf_output/bias:0',
'model/qf_0/qf_output/kernel:0',
'model/qf_1/fc0/bias:0',
'model/qf_1/fc0/kernel:0',
'model/qf_1/fc1/bias:0',
'model/qf_1/fc1/kernel:0',
'model/qf_1/qf_output/bias:0',
'model/qf_1/qf_output/kernel:0',
'target/pi/fc0/bias:0',
'target/pi/fc0/kernel:0',
'target/pi/fc1/bias:0',
'target/pi/fc1/kernel:0',
'target/pi/output/bias:0',
'target/pi/output/kernel:0',
'target/qf_0/fc0/bias:0',
'target/qf_0/fc0/kernel:0',
'target/qf_0/fc1/bias:0',
'target/qf_0/fc1/kernel:0',
'target/qf_0/qf_output/bias:0',
'target/qf_0/qf_output/kernel:0',
'target/qf_1/fc0/bias:0',
'target/qf_1/fc0/kernel:0',
'target/qf_1/fc1/bias:0',
'target/qf_1/fc1/kernel:0',
'target/qf_1/qf_output/bias:0',
'target/qf_1/qf_output/kernel:0']
)
# Check observation/action/context spaces of the agents
self.assertEqual(policy.agents['policy'].ac_space,
self.policy_params_shared['ac_space'])
self.assertEqual(policy.agents['policy'].ob_space,
self.policy_params_shared['ob_space'])
self.assertEqual(policy.agents['policy'].co_space,
self.policy_params_shared['co_space'])
# Check the instantiation of the class attributes.
self.assertTrue(policy.shared)
self.assertTrue(not policy.maddpg)
def test_init_3(self):
policy_params = self.policy_params_independent.copy()
policy_params["maddpg"] = True
policy = TD3MultiFeedForwardPolicy(**policy_params)
self.assertListEqual(
sorted([var.name for var in get_trainable_vars()]),
['a/model/centralized_qf_0/fc0/bias:0',
'a/model/centralized_qf_0/fc0/kernel:0',
'a/model/centralized_qf_0/fc1/bias:0',
'a/model/centralized_qf_0/fc1/kernel:0',
'a/model/centralized_qf_0/qf_output/bias:0',
'a/model/centralized_qf_0/qf_output/kernel:0',
'a/model/centralized_qf_1/fc0/bias:0',
'a/model/centralized_qf_1/fc0/kernel:0',
'a/model/centralized_qf_1/fc1/bias:0',
'a/model/centralized_qf_1/fc1/kernel:0',
'a/model/centralized_qf_1/qf_output/bias:0',
'a/model/centralized_qf_1/qf_output/kernel:0',
'a/model/pi/fc0/bias:0',
'a/model/pi/fc0/kernel:0',
'a/model/pi/fc1/bias:0',
'a/model/pi/fc1/kernel:0',
'a/model/pi/output/bias:0',
'a/model/pi/output/kernel:0',
'a/target/centralized_qf_0/fc0/bias:0',
'a/target/centralized_qf_0/fc0/kernel:0',
'a/target/centralized_qf_0/fc1/bias:0',
'a/target/centralized_qf_0/fc1/kernel:0',
'a/target/centralized_qf_0/qf_output/bias:0',
'a/target/centralized_qf_0/qf_output/kernel:0',
'a/target/centralized_qf_1/fc0/bias:0',
'a/target/centralized_qf_1/fc0/kernel:0',
'a/target/centralized_qf_1/fc1/bias:0',
'a/target/centralized_qf_1/fc1/kernel:0',
'a/target/centralized_qf_1/qf_output/bias:0',
'a/target/centralized_qf_1/qf_output/kernel:0',
'a/target/pi/fc0/bias:0',
'a/target/pi/fc0/kernel:0',
'a/target/pi/fc1/bias:0',
'a/target/pi/fc1/kernel:0',
'a/target/pi/output/bias:0',
'a/target/pi/output/kernel:0',
'b/model/centralized_qf_0/fc0/bias:0',
'b/model/centralized_qf_0/fc0/kernel:0',
'b/model/centralized_qf_0/fc1/bias:0',
'b/model/centralized_qf_0/fc1/kernel:0',
'b/model/centralized_qf_0/qf_output/bias:0',
'b/model/centralized_qf_0/qf_output/kernel:0',
'b/model/centralized_qf_1/fc0/bias:0',
'b/model/centralized_qf_1/fc0/kernel:0',
'b/model/centralized_qf_1/fc1/bias:0',
'b/model/centralized_qf_1/fc1/kernel:0',
'b/model/centralized_qf_1/qf_output/bias:0',
'b/model/centralized_qf_1/qf_output/kernel:0',
'b/model/pi/fc0/bias:0',
'b/model/pi/fc0/kernel:0',
'b/model/pi/fc1/bias:0',
'b/model/pi/fc1/kernel:0',
'b/model/pi/output/bias:0',
'b/model/pi/output/kernel:0',
'b/target/centralized_qf_0/fc0/bias:0',
'b/target/centralized_qf_0/fc0/kernel:0',
'b/target/centralized_qf_0/fc1/bias:0',
'b/target/centralized_qf_0/fc1/kernel:0',
'b/target/centralized_qf_0/qf_output/bias:0',
'b/target/centralized_qf_0/qf_output/kernel:0',
'b/target/centralized_qf_1/fc0/bias:0',
'b/target/centralized_qf_1/fc0/kernel:0',
'b/target/centralized_qf_1/fc1/bias:0',
'b/target/centralized_qf_1/fc1/kernel:0',
'b/target/centralized_qf_1/qf_output/bias:0',
'b/target/centralized_qf_1/qf_output/kernel:0',
'b/target/pi/fc0/bias:0',
'b/target/pi/fc0/kernel:0',
'b/target/pi/fc1/bias:0',
'b/target/pi/fc1/kernel:0',
'b/target/pi/output/bias:0',
'b/target/pi/output/kernel:0']
)
# Check observation/action/context spaces of the agents
for key in policy.ac_space.keys():
self.assertEqual(int(policy.all_obs_ph[key].shape[-1]),
policy.all_ob_space.shape[0])
self.assertEqual(int(policy.all_obs1_ph[key].shape[-1]),
policy.all_ob_space.shape[0])
self.assertEqual(int(policy.all_action_ph[key].shape[-1]),
sum(policy.ac_space[key].shape[0]
for key in policy.ac_space.keys()))
self.assertEqual(int(policy.action_ph[key].shape[-1]),
policy.ac_space[key].shape[0])
self.assertEqual(int(policy.obs_ph[key].shape[-1]),
int(policy.ob_space[key].shape[0]
+ policy.co_space[key].shape[0]))
self.assertEqual(int(policy.obs1_ph[key].shape[-1]),
int(policy.ob_space[key].shape[0]
+ policy.co_space[key].shape[0]))
# Check the instantiation of the class attributes.
self.assertTrue(not policy.shared)
self.assertTrue(policy.maddpg)
def test_init_4(self):
policy_params = self.policy_params_shared.copy()
policy_params["maddpg"] = True
policy = TD3MultiFeedForwardPolicy(**policy_params)
self.assertListEqual(
sorted([var.name for var in get_trainable_vars()]),
['model/centralized_qf_0/fc0/bias:0',
'model/centralized_qf_0/fc0/kernel:0',
'model/centralized_qf_0/fc1/bias:0',
'model/centralized_qf_0/fc1/kernel:0',
'model/centralized_qf_0/qf_output/bias:0',
'model/centralized_qf_0/qf_output/kernel:0',
'model/centralized_qf_1/fc0/bias:0',
'model/centralized_qf_1/fc0/kernel:0',
'model/centralized_qf_1/fc1/bias:0',
'model/centralized_qf_1/fc1/kernel:0',
'model/centralized_qf_1/qf_output/bias:0',
'model/centralized_qf_1/qf_output/kernel:0',
'model/pi/fc0/bias:0',
'model/pi/fc0/kernel:0',
'model/pi/fc1/bias:0',
'model/pi/fc1/kernel:0',
'model/pi/output/bias:0',
'model/pi/output/kernel:0',
'target/centralized_qf_0/fc0/bias:0',
'target/centralized_qf_0/fc0/kernel:0',
'target/centralized_qf_0/fc1/bias:0',
'target/centralized_qf_0/fc1/kernel:0',
'target/centralized_qf_0/qf_output/bias:0',
'target/centralized_qf_0/qf_output/kernel:0',
'target/centralized_qf_1/fc0/bias:0',
'target/centralized_qf_1/fc0/kernel:0',
'target/centralized_qf_1/fc1/bias:0',
'target/centralized_qf_1/fc1/kernel:0',
'target/centralized_qf_1/qf_output/bias:0',
'target/centralized_qf_1/qf_output/kernel:0',
'target/pi/fc0/bias:0',
'target/pi/fc0/kernel:0',
'target/pi/fc1/bias:0',
'target/pi/fc1/kernel:0',
'target/pi/output/bias:0',
'target/pi/output/kernel:0']
)
# Check observation/action/context spaces of the agents
self.assertEqual(int(policy.all_obs_ph.shape[-1]),
policy.all_ob_space.shape[0])
self.assertEqual(int(policy.all_obs1_ph.shape[-1]),
policy.all_ob_space.shape[0])
self.assertEqual(int(policy.all_action_ph.shape[-1]),
policy.n_agents * policy.ac_space.shape[0])
self.assertEqual(int(policy.action_ph[0].shape[-1]),
policy.ac_space.shape[0])
self.assertEqual(int(policy.obs_ph[0].shape[-1]),
int(policy.ob_space.shape[0]
+ policy.co_space.shape[0]))
self.assertEqual(int(policy.obs1_ph[0].shape[-1]),
int(policy.ob_space.shape[0]
+ policy.co_space.shape[0]))
# Check the instantiation of the class attributes.
self.assertTrue(policy.shared)
self.assertTrue(policy.maddpg)
def test_init_5(self):
policy_params = self.policy_params_independent.copy()
policy_params["maddpg"] = True
policy_params["model_params"]["model_type"] = "conv"
_ = TD3MultiFeedForwardPolicy(**policy_params)
self.assertListEqual(
sorted([var.name for var in get_trainable_vars()]),
['a/model/centralized_qf_0/conv0/bias:0',
'a/model/centralized_qf_0/conv0/kernel:0',
'a/model/centralized_qf_0/conv1/bias:0',
'a/model/centralized_qf_0/conv1/kernel:0',
'a/model/centralized_qf_0/conv2/bias:0',
'a/model/centralized_qf_0/conv2/kernel:0',
'a/model/centralized_qf_0/fc0/bias:0',
'a/model/centralized_qf_0/fc0/kernel:0',
'a/model/centralized_qf_0/fc1/bias:0',
'a/model/centralized_qf_0/fc1/kernel:0',
'a/model/centralized_qf_0/qf_output/bias:0',
'a/model/centralized_qf_0/qf_output/kernel:0',
'a/model/centralized_qf_1/conv0/bias:0',
'a/model/centralized_qf_1/conv0/kernel:0',
'a/model/centralized_qf_1/conv1/bias:0',
'a/model/centralized_qf_1/conv1/kernel:0',
'a/model/centralized_qf_1/conv2/bias:0',
'a/model/centralized_qf_1/conv2/kernel:0',
'a/model/centralized_qf_1/fc0/bias:0',
'a/model/centralized_qf_1/fc0/kernel:0',
'a/model/centralized_qf_1/fc1/bias:0',
'a/model/centralized_qf_1/fc1/kernel:0',
'a/model/centralized_qf_1/qf_output/bias:0',
'a/model/centralized_qf_1/qf_output/kernel:0',
'a/model/pi/conv0/bias:0',
'a/model/pi/conv0/kernel:0',
'a/model/pi/conv1/bias:0',
'a/model/pi/conv1/kernel:0',
'a/model/pi/conv2/bias:0',
'a/model/pi/conv2/kernel:0',
'a/model/pi/fc0/bias:0',
'a/model/pi/fc0/kernel:0',
'a/model/pi/fc1/bias:0',
'a/model/pi/fc1/kernel:0',
'a/model/pi/output/bias:0',
'a/model/pi/output/kernel:0',
'a/target/centralized_qf_0/conv0/bias:0',
'a/target/centralized_qf_0/conv0/kernel:0',
'a/target/centralized_qf_0/conv1/bias:0',
'a/target/centralized_qf_0/conv1/kernel:0',
'a/target/centralized_qf_0/conv2/bias:0',
'a/target/centralized_qf_0/conv2/kernel:0',
'a/target/centralized_qf_0/fc0/bias:0',
'a/target/centralized_qf_0/fc0/kernel:0',
'a/target/centralized_qf_0/fc1/bias:0',
'a/target/centralized_qf_0/fc1/kernel:0',
'a/target/centralized_qf_0/qf_output/bias:0',
'a/target/centralized_qf_0/qf_output/kernel:0',
'a/target/centralized_qf_1/conv0/bias:0',
'a/target/centralized_qf_1/conv0/kernel:0',
'a/target/centralized_qf_1/conv1/bias:0',
'a/target/centralized_qf_1/conv1/kernel:0',
'a/target/centralized_qf_1/conv2/bias:0',
'a/target/centralized_qf_1/conv2/kernel:0',
'a/target/centralized_qf_1/fc0/bias:0',
'a/target/centralized_qf_1/fc0/kernel:0',
'a/target/centralized_qf_1/fc1/bias:0',
'a/target/centralized_qf_1/fc1/kernel:0',
'a/target/centralized_qf_1/qf_output/bias:0',
'a/target/centralized_qf_1/qf_output/kernel:0',
'a/target/pi/conv0/bias:0',
'a/target/pi/conv0/kernel:0',
'a/target/pi/conv1/bias:0',
'a/target/pi/conv1/kernel:0',
'a/target/pi/conv2/bias:0',
'a/target/pi/conv2/kernel:0',
'a/target/pi/fc0/bias:0',
'a/target/pi/fc0/kernel:0',
'a/target/pi/fc1/bias:0',
'a/target/pi/fc1/kernel:0',
'a/target/pi/output/bias:0',
'a/target/pi/output/kernel:0',
'b/model/centralized_qf_0/conv0/bias:0',
'b/model/centralized_qf_0/conv0/kernel:0',
'b/model/centralized_qf_0/conv1/bias:0',
'b/model/centralized_qf_0/conv1/kernel:0',
'b/model/centralized_qf_0/conv2/bias:0',
'b/model/centralized_qf_0/conv2/kernel:0',
'b/model/centralized_qf_0/fc0/bias:0',
'b/model/centralized_qf_0/fc0/kernel:0',
'b/model/centralized_qf_0/fc1/bias:0',
'b/model/centralized_qf_0/fc1/kernel:0',
'b/model/centralized_qf_0/qf_output/bias:0',
'b/model/centralized_qf_0/qf_output/kernel:0',
'b/model/centralized_qf_1/conv0/bias:0',
'b/model/centralized_qf_1/conv0/kernel:0',
'b/model/centralized_qf_1/conv1/bias:0',
'b/model/centralized_qf_1/conv1/kernel:0',
'b/model/centralized_qf_1/conv2/bias:0',
'b/model/centralized_qf_1/conv2/kernel:0',
'b/model/centralized_qf_1/fc0/bias:0',
'b/model/centralized_qf_1/fc0/kernel:0',
'b/model/centralized_qf_1/fc1/bias:0',
'b/model/centralized_qf_1/fc1/kernel:0',
'b/model/centralized_qf_1/qf_output/bias:0',
'b/model/centralized_qf_1/qf_output/kernel:0',
'b/model/pi/conv0/bias:0',
'b/model/pi/conv0/kernel:0',
'b/model/pi/conv1/bias:0',
'b/model/pi/conv1/kernel:0',
'b/model/pi/conv2/bias:0',
'b/model/pi/conv2/kernel:0',
'b/model/pi/fc0/bias:0',
'b/model/pi/fc0/kernel:0',
'b/model/pi/fc1/bias:0',
'b/model/pi/fc1/kernel:0',
'b/model/pi/output/bias:0',
'b/model/pi/output/kernel:0',
'b/target/centralized_qf_0/conv0/bias:0',
'b/target/centralized_qf_0/conv0/kernel:0',
'b/target/centralized_qf_0/conv1/bias:0',
'b/target/centralized_qf_0/conv1/kernel:0',
'b/target/centralized_qf_0/conv2/bias:0',
'b/target/centralized_qf_0/conv2/kernel:0',
'b/target/centralized_qf_0/fc0/bias:0',
'b/target/centralized_qf_0/fc0/kernel:0',
'b/target/centralized_qf_0/fc1/bias:0',
'b/target/centralized_qf_0/fc1/kernel:0',
'b/target/centralized_qf_0/qf_output/bias:0',
'b/target/centralized_qf_0/qf_output/kernel:0',
'b/target/centralized_qf_1/conv0/bias:0',
'b/target/centralized_qf_1/conv0/kernel:0',
'b/target/centralized_qf_1/conv1/bias:0',
'b/target/centralized_qf_1/conv1/kernel:0',
'b/target/centralized_qf_1/conv2/bias:0',
'b/target/centralized_qf_1/conv2/kernel:0',
'b/target/centralized_qf_1/fc0/bias:0',
'b/target/centralized_qf_1/fc0/kernel:0',
'b/target/centralized_qf_1/fc1/bias:0',
'b/target/centralized_qf_1/fc1/kernel:0',
'b/target/centralized_qf_1/qf_output/bias:0',
'b/target/centralized_qf_1/qf_output/kernel:0',
'b/target/pi/conv0/bias:0',
'b/target/pi/conv0/kernel:0',
'b/target/pi/conv1/bias:0',
'b/target/pi/conv1/kernel:0',
'b/target/pi/conv2/bias:0',
'b/target/pi/conv2/kernel:0',
'b/target/pi/fc0/bias:0',
'b/target/pi/fc0/kernel:0',
'b/target/pi/fc1/bias:0',
'b/target/pi/fc1/kernel:0',
'b/target/pi/output/bias:0',
'b/target/pi/output/kernel:0']
)
def test_init_6(self):
policy_params = self.policy_params_shared.copy()
policy_params["maddpg"] = True
policy_params["model_params"]["model_type"] = "conv"
_ = TD3MultiFeedForwardPolicy(**policy_params)
self.assertListEqual(
sorted([var.name for var in get_trainable_vars()]),
['model/centralized_qf_0/conv0/bias:0',
'model/centralized_qf_0/conv0/kernel:0',
'model/centralized_qf_0/conv1/bias:0',
'model/centralized_qf_0/conv1/kernel:0',
'model/centralized_qf_0/conv2/bias:0',
'model/centralized_qf_0/conv2/kernel:0',
'model/centralized_qf_0/fc0/bias:0',
'model/centralized_qf_0/fc0/kernel:0',
'model/centralized_qf_0/fc1/bias:0',
'model/centralized_qf_0/fc1/kernel:0',
'model/centralized_qf_0/qf_output/bias:0',
'model/centralized_qf_0/qf_output/kernel:0',
'model/centralized_qf_1/conv0/bias:0',
'model/centralized_qf_1/conv0/kernel:0',
'model/centralized_qf_1/conv1/bias:0',
'model/centralized_qf_1/conv1/kernel:0',
'model/centralized_qf_1/conv2/bias:0',
'model/centralized_qf_1/conv2/kernel:0',
'model/centralized_qf_1/fc0/bias:0',
'model/centralized_qf_1/fc0/kernel:0',
'model/centralized_qf_1/fc1/bias:0',
'model/centralized_qf_1/fc1/kernel:0',
'model/centralized_qf_1/qf_output/bias:0',
'model/centralized_qf_1/qf_output/kernel:0',
'model/pi/conv0/bias:0',
'model/pi/conv0/kernel:0',
'model/pi/conv1/bias:0',
'model/pi/conv1/kernel:0',
'model/pi/conv2/bias:0',
'model/pi/conv2/kernel:0',
'model/pi/fc0/bias:0',
'model/pi/fc0/kernel:0',
'model/pi/fc1/bias:0',
'model/pi/fc1/kernel:0',
'model/pi/output/bias:0',
'model/pi/output/kernel:0',
'target/centralized_qf_0/conv0/bias:0',
'target/centralized_qf_0/conv0/kernel:0',
'target/centralized_qf_0/conv1/bias:0',
'target/centralized_qf_0/conv1/kernel:0',
'target/centralized_qf_0/conv2/bias:0',
'target/centralized_qf_0/conv2/kernel:0',
'target/centralized_qf_0/fc0/bias:0',
'target/centralized_qf_0/fc0/kernel:0',
'target/centralized_qf_0/fc1/bias:0',
'target/centralized_qf_0/fc1/kernel:0',
'target/centralized_qf_0/qf_output/bias:0',
'target/centralized_qf_0/qf_output/kernel:0',
'target/centralized_qf_1/conv0/bias:0',
'target/centralized_qf_1/conv0/kernel:0',
'target/centralized_qf_1/conv1/bias:0',
'target/centralized_qf_1/conv1/kernel:0',
'target/centralized_qf_1/conv2/bias:0',
'target/centralized_qf_1/conv2/kernel:0',
'target/centralized_qf_1/fc0/bias:0',
'target/centralized_qf_1/fc0/kernel:0',
'target/centralized_qf_1/fc1/bias:0',
'target/centralized_qf_1/fc1/kernel:0',
'target/centralized_qf_1/qf_output/bias:0',
'target/centralized_qf_1/qf_output/kernel:0',
'target/pi/conv0/bias:0',
'target/pi/conv0/kernel:0',
'target/pi/conv1/bias:0',
'target/pi/conv1/kernel:0',
'target/pi/conv2/bias:0',
'target/pi/conv2/kernel:0',
'target/pi/fc0/bias:0',
'target/pi/fc0/kernel:0',
'target/pi/fc1/bias:0',
'target/pi/fc1/kernel:0',
'target/pi/output/bias:0',
'target/pi/output/kernel:0']
)
def test_initialize_1(self):
"""Check the functionality of the initialize() method.
This test validates that the target variables are properly initialized
when initialize is called.
This is done for the following cases:
1. maddpg = False, shared = False
2. maddpg = False, shared = True
3. maddpg = True, shared = False
4. maddpg = True, shared = True
"""
policy_params = self.policy_params_independent.copy()
policy_params["maddpg"] = False
policy = TD3MultiFeedForwardPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
model_var_list = [
'a/model/pi/fc0/bias:0',
'a/model/pi/fc0/kernel:0',
'a/model/pi/fc1/bias:0',
'a/model/pi/fc1/kernel:0',
'a/model/pi/output/bias:0',
'a/model/pi/output/kernel:0',
'a/model/qf_0/fc0/bias:0',
'a/model/qf_0/fc0/kernel:0',
'a/model/qf_0/fc1/bias:0',
'a/model/qf_0/fc1/kernel:0',
'a/model/qf_0/qf_output/bias:0',
'a/model/qf_0/qf_output/kernel:0',
'a/model/qf_1/fc0/bias:0',
'a/model/qf_1/fc0/kernel:0',
'a/model/qf_1/fc1/bias:0',
'a/model/qf_1/fc1/kernel:0',
'a/model/qf_1/qf_output/bias:0',
'a/model/qf_1/qf_output/kernel:0',
'b/model/pi/fc0/bias:0',
'b/model/pi/fc0/kernel:0',
'b/model/pi/fc1/bias:0',
'b/model/pi/fc1/kernel:0',
'b/model/pi/output/bias:0',
'b/model/pi/output/kernel:0',
'b/model/qf_0/fc0/bias:0',
'b/model/qf_0/fc0/kernel:0',
'b/model/qf_0/fc1/bias:0',
'b/model/qf_0/fc1/kernel:0',
'b/model/qf_0/qf_output/bias:0',
'b/model/qf_0/qf_output/kernel:0',
'b/model/qf_1/fc0/bias:0',
'b/model/qf_1/fc0/kernel:0',
'b/model/qf_1/fc1/bias:0',
'b/model/qf_1/fc1/kernel:0',
'b/model/qf_1/qf_output/bias:0',
'b/model/qf_1/qf_output/kernel:0',
]
target_var_list = [
'a/target/pi/fc0/bias:0',
'a/target/pi/fc0/kernel:0',
'a/target/pi/fc1/bias:0',
'a/target/pi/fc1/kernel:0',
'a/target/pi/output/bias:0',
'a/target/pi/output/kernel:0',
'a/target/qf_0/fc0/bias:0',
'a/target/qf_0/fc0/kernel:0',
'a/target/qf_0/fc1/bias:0',
'a/target/qf_0/fc1/kernel:0',
'a/target/qf_0/qf_output/bias:0',
'a/target/qf_0/qf_output/kernel:0',
'a/target/qf_1/fc0/bias:0',
'a/target/qf_1/fc0/kernel:0',
'a/target/qf_1/fc1/bias:0',
'a/target/qf_1/fc1/kernel:0',
'a/target/qf_1/qf_output/bias:0',
'a/target/qf_1/qf_output/kernel:0',
'b/target/pi/fc0/bias:0',
'b/target/pi/fc0/kernel:0',
'b/target/pi/fc1/bias:0',
'b/target/pi/fc1/kernel:0',
'b/target/pi/output/bias:0',
'b/target/pi/output/kernel:0',
'b/target/qf_0/fc0/bias:0',
'b/target/qf_0/fc0/kernel:0',
'b/target/qf_0/fc1/bias:0',
'b/target/qf_0/fc1/kernel:0',
'b/target/qf_0/qf_output/bias:0',
'b/target/qf_0/qf_output/kernel:0',
'b/target/qf_1/fc0/bias:0',
'b/target/qf_1/fc0/kernel:0',
'b/target/qf_1/fc1/bias:0',
'b/target/qf_1/fc1/kernel:0',
'b/target/qf_1/qf_output/bias:0',
'b/target/qf_1/qf_output/kernel:0',
]
for model, target in zip(model_var_list, target_var_list):
with tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope(), reuse=True):
model_val = policy.sess.run(model)
target_val = policy.sess.run(target)
np.testing.assert_almost_equal(model_val, target_val)
def test_initialize_2(self):
policy_params = self.policy_params_shared.copy()
policy_params["maddpg"] = False
policy = TD3MultiFeedForwardPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
model_var_list = [
'model/pi/fc0/bias:0',
'model/pi/fc0/kernel:0',
'model/pi/fc1/bias:0',
'model/pi/fc1/kernel:0',
'model/pi/output/bias:0',
'model/pi/output/kernel:0',
'model/qf_0/fc0/bias:0',
'model/qf_0/fc0/kernel:0',
'model/qf_0/fc1/bias:0',
'model/qf_0/fc1/kernel:0',
'model/qf_0/qf_output/bias:0',
'model/qf_0/qf_output/kernel:0',
'model/qf_1/fc0/bias:0',
'model/qf_1/fc0/kernel:0',
'model/qf_1/fc1/bias:0',
'model/qf_1/fc1/kernel:0',
'model/qf_1/qf_output/bias:0',
'model/qf_1/qf_output/kernel:0',
]
target_var_list = [
'target/pi/fc0/bias:0',
'target/pi/fc0/kernel:0',
'target/pi/fc1/bias:0',
'target/pi/fc1/kernel:0',
'target/pi/output/bias:0',
'target/pi/output/kernel:0',
'target/qf_0/fc0/bias:0',
'target/qf_0/fc0/kernel:0',
'target/qf_0/fc1/bias:0',
'target/qf_0/fc1/kernel:0',
'target/qf_0/qf_output/bias:0',
'target/qf_0/qf_output/kernel:0',
'target/qf_1/fc0/bias:0',
'target/qf_1/fc0/kernel:0',
'target/qf_1/fc1/bias:0',
'target/qf_1/fc1/kernel:0',
'target/qf_1/qf_output/bias:0',
'target/qf_1/qf_output/kernel:0'
]
for model, target in zip(model_var_list, target_var_list):
with tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope(), reuse=True):
model_val = policy.sess.run(model)
target_val = policy.sess.run(target)
np.testing.assert_almost_equal(model_val, target_val)
def test_initialize_3(self):
policy_params = self.policy_params_independent.copy()
policy_params["maddpg"] = True
policy = TD3MultiFeedForwardPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
model_var_list = [
'a/model/centralized_qf_0/fc0/bias:0',
'a/model/centralized_qf_0/fc0/kernel:0',
'a/model/centralized_qf_0/fc1/bias:0',
'a/model/centralized_qf_0/fc1/kernel:0',
'a/model/centralized_qf_0/qf_output/bias:0',
'a/model/centralized_qf_0/qf_output/kernel:0',
'a/model/centralized_qf_1/fc0/bias:0',
'a/model/centralized_qf_1/fc0/kernel:0',
'a/model/centralized_qf_1/fc1/bias:0',
'a/model/centralized_qf_1/fc1/kernel:0',
'a/model/centralized_qf_1/qf_output/bias:0',
'a/model/centralized_qf_1/qf_output/kernel:0',
'a/model/pi/fc0/bias:0',
'a/model/pi/fc0/kernel:0',
'a/model/pi/fc1/bias:0',
'a/model/pi/fc1/kernel:0',
'a/model/pi/output/bias:0',
'a/model/pi/output/kernel:0',
'b/model/centralized_qf_0/fc0/bias:0',
'b/model/centralized_qf_0/fc0/kernel:0',
'b/model/centralized_qf_0/fc1/bias:0',
'b/model/centralized_qf_0/fc1/kernel:0',
'b/model/centralized_qf_0/qf_output/bias:0',
'b/model/centralized_qf_0/qf_output/kernel:0',
'b/model/centralized_qf_1/fc0/bias:0',
'b/model/centralized_qf_1/fc0/kernel:0',
'b/model/centralized_qf_1/fc1/bias:0',
'b/model/centralized_qf_1/fc1/kernel:0',
'b/model/centralized_qf_1/qf_output/bias:0',
'b/model/centralized_qf_1/qf_output/kernel:0',
'b/model/pi/fc0/bias:0',
'b/model/pi/fc0/kernel:0',
'b/model/pi/fc1/bias:0',
'b/model/pi/fc1/kernel:0',
'b/model/pi/output/bias:0',
'b/model/pi/output/kernel:0',
]
target_var_list = [
'a/target/centralized_qf_0/fc0/bias:0',
'a/target/centralized_qf_0/fc0/kernel:0',
'a/target/centralized_qf_0/fc1/bias:0',
'a/target/centralized_qf_0/fc1/kernel:0',
'a/target/centralized_qf_0/qf_output/bias:0',
'a/target/centralized_qf_0/qf_output/kernel:0',
'a/target/centralized_qf_1/fc0/bias:0',
'a/target/centralized_qf_1/fc0/kernel:0',
'a/target/centralized_qf_1/fc1/bias:0',
'a/target/centralized_qf_1/fc1/kernel:0',
'a/target/centralized_qf_1/qf_output/bias:0',
'a/target/centralized_qf_1/qf_output/kernel:0',
'a/target/pi/fc0/bias:0',
'a/target/pi/fc0/kernel:0',
'a/target/pi/fc1/bias:0',
'a/target/pi/fc1/kernel:0',
'a/target/pi/output/bias:0',
'a/target/pi/output/kernel:0',
'b/target/centralized_qf_0/fc0/bias:0',
'b/target/centralized_qf_0/fc0/kernel:0',
'b/target/centralized_qf_0/fc1/bias:0',
'b/target/centralized_qf_0/fc1/kernel:0',
'b/target/centralized_qf_0/qf_output/bias:0',
'b/target/centralized_qf_0/qf_output/kernel:0',
'b/target/centralized_qf_1/fc0/bias:0',
'b/target/centralized_qf_1/fc0/kernel:0',
'b/target/centralized_qf_1/fc1/bias:0',
'b/target/centralized_qf_1/fc1/kernel:0',
'b/target/centralized_qf_1/qf_output/bias:0',
'b/target/centralized_qf_1/qf_output/kernel:0',
'b/target/pi/fc0/bias:0',
'b/target/pi/fc0/kernel:0',
'b/target/pi/fc1/bias:0',
'b/target/pi/fc1/kernel:0',
'b/target/pi/output/bias:0',
'b/target/pi/output/kernel:0',
]
for model, target in zip(model_var_list, target_var_list):
with tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope(), reuse=True):
model_val = policy.sess.run(model)
target_val = policy.sess.run(target)
np.testing.assert_almost_equal(model_val, target_val)
def test_initialize_4(self):
policy_params = self.policy_params_shared.copy()
policy_params["maddpg"] = True
policy = TD3MultiFeedForwardPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
model_var_list = [
'model/centralized_qf_0/fc0/bias:0',
'model/centralized_qf_0/fc0/kernel:0',
'model/centralized_qf_0/fc1/bias:0',
'model/centralized_qf_0/fc1/kernel:0',
'model/centralized_qf_0/qf_output/bias:0',
'model/centralized_qf_0/qf_output/kernel:0',
'model/centralized_qf_1/fc0/bias:0',
'model/centralized_qf_1/fc0/kernel:0',
'model/centralized_qf_1/fc1/bias:0',
'model/centralized_qf_1/fc1/kernel:0',
'model/centralized_qf_1/qf_output/bias:0',
'model/centralized_qf_1/qf_output/kernel:0',
'model/pi/fc0/bias:0',
'model/pi/fc0/kernel:0',
'model/pi/fc1/bias:0',
'model/pi/fc1/kernel:0',
'model/pi/output/bias:0',
'model/pi/output/kernel:0',
]
target_var_list = [
'target/centralized_qf_0/fc0/bias:0',
'target/centralized_qf_0/fc0/kernel:0',
'target/centralized_qf_0/fc1/bias:0',
'target/centralized_qf_0/fc1/kernel:0',
'target/centralized_qf_0/qf_output/bias:0',
'target/centralized_qf_0/qf_output/kernel:0',
'target/centralized_qf_1/fc0/bias:0',
'target/centralized_qf_1/fc0/kernel:0',
'target/centralized_qf_1/fc1/bias:0',
'target/centralized_qf_1/fc1/kernel:0',
'target/centralized_qf_1/qf_output/bias:0',
'target/centralized_qf_1/qf_output/kernel:0',
'target/pi/fc0/bias:0',
'target/pi/fc0/kernel:0',
'target/pi/fc1/bias:0',
'target/pi/fc1/kernel:0',
'target/pi/output/bias:0',
'target/pi/output/kernel:0',
]
for model, target in zip(model_var_list, target_var_list):
with tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope(), reuse=True):
model_val = policy.sess.run(model)
target_val = policy.sess.run(target)
np.testing.assert_almost_equal(model_val, target_val)
def test_store_transition_1(self):
"""Check the functionality of the store_transition() method.
This test checks for the following cases:
1. maddpg = True, shared = False
2. maddpg = True, shared = True
"""
policy_params = self.policy_params_independent.copy()
policy_params["maddpg"] = True
policy = TD3MultiFeedForwardPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
for i in range(4):
action_0 = np.array([i for _ in range(1)])
action_1 = np.array([i for _ in range(2)])
context0_0 = np.array([i for _ in range(3)])
context0_1 = np.array([i for _ in range(4)])
obs0_0 = np.array([i for _ in range(5)])
obs0_1 = np.array([i for _ in range(6)])
reward = i
obs1_0 = np.array([i+1 for _ in range(5)])
obs1_1 = np.array([i+1 for _ in range(6)])
context1_0 = np.array([i for _ in range(3)])
context1_1 = np.array([i for _ in range(4)])
done = False
is_final_step = False
evaluate = False
all_obs0 = np.array([i for _ in range(18)])
all_obs1 = np.array([i+1 for _ in range(18)])
policy.store_transition(
obs0={"a": obs0_0, "b": obs0_1},
context0={"a": context0_0, "b": context0_1},
action={"a": action_0, "b": action_1},
reward={"a": reward, "b": reward},
obs1={"a": obs1_0, "b": obs1_1},
context1={"a": context1_0, "b": context1_1},
done=done,
is_final_step=is_final_step,
evaluate=evaluate,
env_num=0,
all_obs0=all_obs0,
all_obs1=all_obs1,
)
# =================================================================== #
# test for agent a #
# =================================================================== #
obs_t = policy.replay_buffer["a"].obs_t
action_t = policy.replay_buffer["a"].action_t
reward = policy.replay_buffer["a"].reward
done = policy.replay_buffer["a"].done
all_obs_t = policy.replay_buffer["a"].all_obs_t
# check the various attributes
np.testing.assert_almost_equal(
obs_t[:4, :],
np.array([[0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2.],
[3., 3., 3., 3., 3., 3., 3., 3.]])
)
np.testing.assert_almost_equal(
action_t[:4, :],
np.array([[0.], [1.], [2.], [3.]])
)
np.testing.assert_almost_equal(
reward[:4],
np.array([0., 1., 2., 3.])
)
np.testing.assert_almost_equal(
done[:4],
[0., 0., 0., 0.]
)
np.testing.assert_almost_equal(
all_obs_t[:4, :],
np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2.],
[3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3.,
3., 3., 3., 3.]])
)
# =================================================================== #
# test for agent b #
# =================================================================== #
obs_t = policy.replay_buffer["b"].obs_t
action_t = policy.replay_buffer["b"].action_t
reward = policy.replay_buffer["b"].reward
done = policy.replay_buffer["b"].done
all_obs_t = policy.replay_buffer["b"].all_obs_t
# check the various attributes
np.testing.assert_almost_equal(
obs_t[:4, :],
np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 2.],
[3., 3., 3., 3., 3., 3., 3., 3., 3., 3.]])
)
np.testing.assert_almost_equal(
action_t[:4, :],
np.array([[0., 0.], [1., 1.], [2., 2.], [3., 3.]])
)
np.testing.assert_almost_equal(
reward[:4],
np.array([0., 1., 2., 3.])
)
np.testing.assert_almost_equal(
done[:4],
[0., 0., 0., 0.]
)
np.testing.assert_almost_equal(
all_obs_t[:4, :],
np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2.],
[3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3.,
3., 3., 3., 3.]])
)
def test_store_transition_2(self):
policy_params = self.policy_params_shared.copy()
policy_params["maddpg"] = True
policy_params["n_agents"] = 2
policy = TD3MultiFeedForwardPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
for i in range(4):
obs0 = np.array([i for _ in range(2)])
context0 = np.array([i for _ in range(3)])
action = np.array([i for _ in range(1)])
reward = i
obs1 = np.array([i+1 for _ in range(2)])
context1 = np.array([i for _ in range(3)])
is_final_step = False
evaluate = False
all_obs0 = np.array([i for _ in range(10)])
all_obs1 = np.array([i+1 for _ in range(10)])
policy.store_transition(
obs0={"a": obs0, "b": obs0 + 1},
context0={"a": context0, "b": context0 + 1},
action={"a": action, "b": action + 1},
reward={"a": reward, "b": reward + 1},
obs1={"a": obs1, "b": obs1 + 1},
context1={"a": context1, "b": context1 + 1},
done=0.,
is_final_step=is_final_step,
evaluate=evaluate,
env_num=0,
all_obs0=all_obs0,
all_obs1=all_obs1,
)
# extract the attributes
obs_t = policy.replay_buffer.obs_t
action_t = policy.replay_buffer.action
reward = policy.replay_buffer.reward
done = policy.replay_buffer.done
all_obs_t = policy.replay_buffer.all_obs_t
# check the various attributes
np.testing.assert_almost_equal(
obs_t[0][:4, :],
np.array([[0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2.],
[3., 3., 3., 3., 3.]])
)
np.testing.assert_almost_equal(
action_t[0][:4, :],
np.array([[0.], [1.], [2.], [3.]])
)
np.testing.assert_almost_equal(
reward[:4],
np.array([0., 1., 2., 3.])
)
np.testing.assert_almost_equal(
done[:4],
[0., 0., 0., 0.]
)
np.testing.assert_almost_equal(
all_obs_t[:4, :],
np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 2.],
[3., 3., 3., 3., 3., 3., 3., 3., 3., 3.]])
)
class TestSACMultiFeedForwardPolicy(unittest.TestCase):
"""Test MultiFeedForwardPolicy in hbaselines/multiagent/sac.py."""
def setUp(self):
self.sess = tf.compat.v1.Session()
# Shared policy parameters
self.policy_params_shared = {
'sess': self.sess,
'ac_space': Box(low=-1, high=1, shape=(1,)),
'co_space': Box(low=-2, high=2, shape=(2,)),
'ob_space': Box(low=-3, high=3, shape=(3,)),
'all_ob_space': Box(low=-3, high=3, shape=(10,)),
'verbose': 0,
}
self.policy_params_shared.update(SAC_PARAMS.copy())
self.policy_params_shared.update(MULTIAGENT_PARAMS.copy())
self.policy_params_shared['shared'] = True
self.policy_params_shared['model_params']['model_type'] = 'fcnet'
# Independent policy parameters
self.policy_params_independent = {
'sess': self.sess,
'ac_space': {
'a': Box(low=-1, high=1, shape=(1,)),
'b': Box(low=-2, high=2, shape=(2,)),
},
'co_space': {
'a': Box(low=-3, high=3, shape=(3,)),
'b': Box(low=-4, high=4, shape=(4,)),
},
'ob_space': {
'a': Box(low=-5, high=5, shape=(5,)),
'b': Box(low=-6, high=6, shape=(6,)),
},
'all_ob_space': Box(low=-6, high=6, shape=(18,)),
'verbose': 0,
}
self.policy_params_independent.update(SAC_PARAMS.copy())
self.policy_params_independent.update(MULTIAGENT_PARAMS.copy())
self.policy_params_independent['shared'] = False
self.policy_params_independent['model_params']['model_type'] = 'fcnet'
def tearDown(self):
self.sess.close()
del self.policy_params_shared
del self.policy_params_independent
# Clear the graph.
tf.compat.v1.reset_default_graph()
def test_deprecated(self):
"""Make sure that the original path still works (temporarily)."""
raised = False
try:
from hbaselines.multi_fcnet.sac import MultiFeedForwardPolicy
policy_params = self.policy_params_independent.copy()
_ = MultiFeedForwardPolicy(**policy_params)
except ModuleNotFoundError: # pragma: no cover
raised = True # pragma: no cover
self.assertFalse(raised, 'Exception raised')
def test_init_1(self):
"""Check the functionality of the __init__() method.
This method is tested for the following features:
1. The proper structure graph was generated.
2. All input placeholders are correct.
This is done for the following cases:
1. maddpg = False, shared = False, model_type = "fcnet"
2. maddpg = False, shared = True, model_type = "fcnet"
3. maddpg = True, shared = False, model_type = "fcnet"
4. maddpg = True, shared = True, model_type = "fcnet"
5. maddpg = True, shared = False, model_type = "conv"
6. maddpg = True, shared = True, model_type = "conv"
"""
policy_params = self.policy_params_independent.copy()
policy_params["maddpg"] = False
policy = SACMultiFeedForwardPolicy(**policy_params)
self.assertListEqual(
sorted([var.name for var in get_trainable_vars()]),
['a/model/log_alpha:0',
'a/model/pi/fc0/bias:0',
'a/model/pi/fc0/kernel:0',
'a/model/pi/fc1/bias:0',
'a/model/pi/fc1/kernel:0',
'a/model/pi/log_std/bias:0',
'a/model/pi/log_std/kernel:0',
'a/model/pi/mean/bias:0',
'a/model/pi/mean/kernel:0',
'a/model/value_fns/qf1/fc0/bias:0',
'a/model/value_fns/qf1/fc0/kernel:0',
'a/model/value_fns/qf1/fc1/bias:0',
'a/model/value_fns/qf1/fc1/kernel:0',
'a/model/value_fns/qf1/qf_output/bias:0',
'a/model/value_fns/qf1/qf_output/kernel:0',
'a/model/value_fns/qf2/fc0/bias:0',
'a/model/value_fns/qf2/fc0/kernel:0',
'a/model/value_fns/qf2/fc1/bias:0',
'a/model/value_fns/qf2/fc1/kernel:0',
'a/model/value_fns/qf2/qf_output/bias:0',
'a/model/value_fns/qf2/qf_output/kernel:0',
'a/model/value_fns/vf/fc0/bias:0',
'a/model/value_fns/vf/fc0/kernel:0',
'a/model/value_fns/vf/fc1/bias:0',
'a/model/value_fns/vf/fc1/kernel:0',
'a/model/value_fns/vf/vf_output/bias:0',
'a/model/value_fns/vf/vf_output/kernel:0',
'a/target/value_fns/vf/fc0/bias:0',
'a/target/value_fns/vf/fc0/kernel:0',
'a/target/value_fns/vf/fc1/bias:0',
'a/target/value_fns/vf/fc1/kernel:0',
'a/target/value_fns/vf/vf_output/bias:0',
'a/target/value_fns/vf/vf_output/kernel:0',
'b/model/log_alpha:0',
'b/model/pi/fc0/bias:0',
'b/model/pi/fc0/kernel:0',
'b/model/pi/fc1/bias:0',
'b/model/pi/fc1/kernel:0',
'b/model/pi/log_std/bias:0',
'b/model/pi/log_std/kernel:0',
'b/model/pi/mean/bias:0',
'b/model/pi/mean/kernel:0',
'b/model/value_fns/qf1/fc0/bias:0',
'b/model/value_fns/qf1/fc0/kernel:0',
'b/model/value_fns/qf1/fc1/bias:0',
'b/model/value_fns/qf1/fc1/kernel:0',
'b/model/value_fns/qf1/qf_output/bias:0',
'b/model/value_fns/qf1/qf_output/kernel:0',
'b/model/value_fns/qf2/fc0/bias:0',
'b/model/value_fns/qf2/fc0/kernel:0',
'b/model/value_fns/qf2/fc1/bias:0',
'b/model/value_fns/qf2/fc1/kernel:0',
'b/model/value_fns/qf2/qf_output/bias:0',
'b/model/value_fns/qf2/qf_output/kernel:0',
'b/model/value_fns/vf/fc0/bias:0',
'b/model/value_fns/vf/fc0/kernel:0',
'b/model/value_fns/vf/fc1/bias:0',
'b/model/value_fns/vf/fc1/kernel:0',
'b/model/value_fns/vf/vf_output/bias:0',
'b/model/value_fns/vf/vf_output/kernel:0',
'b/target/value_fns/vf/fc0/bias:0',
'b/target/value_fns/vf/fc0/kernel:0',
'b/target/value_fns/vf/fc1/bias:0',
'b/target/value_fns/vf/fc1/kernel:0',
'b/target/value_fns/vf/vf_output/bias:0',
'b/target/value_fns/vf/vf_output/kernel:0']
)
# Check observation/action/context spaces of the agents
self.assertEqual(policy.agents['a'].ac_space,
self.policy_params_independent['ac_space']['a'])
self.assertEqual(policy.agents['a'].ob_space,
self.policy_params_independent['ob_space']['a'])
self.assertEqual(policy.agents['a'].co_space,
self.policy_params_independent['co_space']['a'])
self.assertEqual(policy.agents['b'].ac_space,
self.policy_params_independent['ac_space']['b'])
self.assertEqual(policy.agents['b'].ob_space,
self.policy_params_independent['ob_space']['b'])
self.assertEqual(policy.agents['b'].co_space,
self.policy_params_independent['co_space']['b'])
# Check the instantiation of the class attributes.
self.assertTrue(not policy.shared)
self.assertTrue(not policy.maddpg)
def test_init_2(self):
policy_params = self.policy_params_shared.copy()
policy_params["maddpg"] = False
policy = SACMultiFeedForwardPolicy(**policy_params)
self.assertListEqual(
sorted([var.name for var in get_trainable_vars()]),
['model/log_alpha:0',
'model/pi/fc0/bias:0',
'model/pi/fc0/kernel:0',
'model/pi/fc1/bias:0',
'model/pi/fc1/kernel:0',
'model/pi/log_std/bias:0',
'model/pi/log_std/kernel:0',
'model/pi/mean/bias:0',
'model/pi/mean/kernel:0',
'model/value_fns/qf1/fc0/bias:0',
'model/value_fns/qf1/fc0/kernel:0',
'model/value_fns/qf1/fc1/bias:0',
'model/value_fns/qf1/fc1/kernel:0',
'model/value_fns/qf1/qf_output/bias:0',
'model/value_fns/qf1/qf_output/kernel:0',
'model/value_fns/qf2/fc0/bias:0',
'model/value_fns/qf2/fc0/kernel:0',
'model/value_fns/qf2/fc1/bias:0',
'model/value_fns/qf2/fc1/kernel:0',
'model/value_fns/qf2/qf_output/bias:0',
'model/value_fns/qf2/qf_output/kernel:0',
'model/value_fns/vf/fc0/bias:0',
'model/value_fns/vf/fc0/kernel:0',
'model/value_fns/vf/fc1/bias:0',
'model/value_fns/vf/fc1/kernel:0',
'model/value_fns/vf/vf_output/bias:0',
'model/value_fns/vf/vf_output/kernel:0',
'target/value_fns/vf/fc0/bias:0',
'target/value_fns/vf/fc0/kernel:0',
'target/value_fns/vf/fc1/bias:0',
'target/value_fns/vf/fc1/kernel:0',
'target/value_fns/vf/vf_output/bias:0',
'target/value_fns/vf/vf_output/kernel:0']
)
# Check observation/action/context spaces of the agents
self.assertEqual(policy.agents['policy'].ac_space,
self.policy_params_shared['ac_space'])
self.assertEqual(policy.agents['policy'].ob_space,
self.policy_params_shared['ob_space'])
self.assertEqual(policy.agents['policy'].co_space,
self.policy_params_shared['co_space'])
# Check the instantiation of the class attributes.
self.assertTrue(policy.shared)
self.assertTrue(not policy.maddpg)
def test_init_3(self):
policy_params = self.policy_params_independent.copy()
policy_params["maddpg"] = True
policy = SACMultiFeedForwardPolicy(**policy_params)
self.assertListEqual(
sorted([var.name for var in get_trainable_vars()]),
['a/model/centralized_value_fns/qf1/fc0/bias:0',
'a/model/centralized_value_fns/qf1/fc0/kernel:0',
'a/model/centralized_value_fns/qf1/fc1/bias:0',
'a/model/centralized_value_fns/qf1/fc1/kernel:0',
'a/model/centralized_value_fns/qf1/qf_output/bias:0',
'a/model/centralized_value_fns/qf1/qf_output/kernel:0',
'a/model/centralized_value_fns/qf2/fc0/bias:0',
'a/model/centralized_value_fns/qf2/fc0/kernel:0',
'a/model/centralized_value_fns/qf2/fc1/bias:0',
'a/model/centralized_value_fns/qf2/fc1/kernel:0',
'a/model/centralized_value_fns/qf2/qf_output/bias:0',
'a/model/centralized_value_fns/qf2/qf_output/kernel:0',
'a/model/centralized_value_fns/vf/fc0/bias:0',
'a/model/centralized_value_fns/vf/fc0/kernel:0',
'a/model/centralized_value_fns/vf/fc1/bias:0',
'a/model/centralized_value_fns/vf/fc1/kernel:0',
'a/model/centralized_value_fns/vf/vf_output/bias:0',
'a/model/centralized_value_fns/vf/vf_output/kernel:0',
'a/model/log_alpha:0',
'a/model/pi/fc0/bias:0',
'a/model/pi/fc0/kernel:0',
'a/model/pi/fc1/bias:0',
'a/model/pi/fc1/kernel:0',
'a/model/pi/log_std/bias:0',
'a/model/pi/log_std/kernel:0',
'a/model/pi/mean/bias:0',
'a/model/pi/mean/kernel:0',
'a/target/centralized_value_fns/vf/fc0/bias:0',
'a/target/centralized_value_fns/vf/fc0/kernel:0',
'a/target/centralized_value_fns/vf/fc1/bias:0',
'a/target/centralized_value_fns/vf/fc1/kernel:0',
'a/target/centralized_value_fns/vf/vf_output/bias:0',
'a/target/centralized_value_fns/vf/vf_output/kernel:0',
'b/model/centralized_value_fns/qf1/fc0/bias:0',
'b/model/centralized_value_fns/qf1/fc0/kernel:0',
'b/model/centralized_value_fns/qf1/fc1/bias:0',
'b/model/centralized_value_fns/qf1/fc1/kernel:0',
'b/model/centralized_value_fns/qf1/qf_output/bias:0',
'b/model/centralized_value_fns/qf1/qf_output/kernel:0',
'b/model/centralized_value_fns/qf2/fc0/bias:0',
'b/model/centralized_value_fns/qf2/fc0/kernel:0',
'b/model/centralized_value_fns/qf2/fc1/bias:0',
'b/model/centralized_value_fns/qf2/fc1/kernel:0',
'b/model/centralized_value_fns/qf2/qf_output/bias:0',
'b/model/centralized_value_fns/qf2/qf_output/kernel:0',
'b/model/centralized_value_fns/vf/fc0/bias:0',
'b/model/centralized_value_fns/vf/fc0/kernel:0',
'b/model/centralized_value_fns/vf/fc1/bias:0',
'b/model/centralized_value_fns/vf/fc1/kernel:0',
'b/model/centralized_value_fns/vf/vf_output/bias:0',
'b/model/centralized_value_fns/vf/vf_output/kernel:0',
'b/model/log_alpha:0',
'b/model/pi/fc0/bias:0',
'b/model/pi/fc0/kernel:0',
'b/model/pi/fc1/bias:0',
'b/model/pi/fc1/kernel:0',
'b/model/pi/log_std/bias:0',
'b/model/pi/log_std/kernel:0',
'b/model/pi/mean/bias:0',
'b/model/pi/mean/kernel:0',
'b/target/centralized_value_fns/vf/fc0/bias:0',
'b/target/centralized_value_fns/vf/fc0/kernel:0',
'b/target/centralized_value_fns/vf/fc1/bias:0',
'b/target/centralized_value_fns/vf/fc1/kernel:0',
'b/target/centralized_value_fns/vf/vf_output/bias:0',
'b/target/centralized_value_fns/vf/vf_output/kernel:0']
)
# Check observation/action/context spaces of the agents
for key in policy.ac_space.keys():
self.assertEqual(int(policy.all_obs_ph[key].shape[-1]),
int(policy.all_ob_space.shape[0]))
self.assertEqual(int(policy.all_obs1_ph[key].shape[-1]),
int(policy.all_ob_space.shape[0]))
self.assertEqual(int(policy.all_action_ph[key].shape[-1]),
sum(policy.ac_space[key].shape[0]
for key in policy.ac_space.keys()))
self.assertEqual(int(policy.action_ph[key].shape[-1]),
int(policy.ac_space[key].shape[0]))
self.assertEqual(int(policy.obs_ph[key].shape[-1]),
int(policy.ob_space[key].shape[0]
+ policy.co_space[key].shape[0]))
self.assertEqual(int(policy.obs1_ph[key].shape[-1]),
int(policy.ob_space[key].shape[0]
+ policy.co_space[key].shape[0]))
# Check the instantiation of the class attributes.
self.assertTrue(not policy.shared)
self.assertTrue(policy.maddpg)
def test_init_4(self):
policy_params = self.policy_params_shared.copy()
policy_params["maddpg"] = True
policy = SACMultiFeedForwardPolicy(**policy_params)
self.assertListEqual(
sorted([var.name for var in get_trainable_vars()]),
['model/centralized_value_fns/qf1/fc0/bias:0',
'model/centralized_value_fns/qf1/fc0/kernel:0',
'model/centralized_value_fns/qf1/fc1/bias:0',
'model/centralized_value_fns/qf1/fc1/kernel:0',
'model/centralized_value_fns/qf1/qf_output/bias:0',
'model/centralized_value_fns/qf1/qf_output/kernel:0',
'model/centralized_value_fns/qf2/fc0/bias:0',
'model/centralized_value_fns/qf2/fc0/kernel:0',
'model/centralized_value_fns/qf2/fc1/bias:0',
'model/centralized_value_fns/qf2/fc1/kernel:0',
'model/centralized_value_fns/qf2/qf_output/bias:0',
'model/centralized_value_fns/qf2/qf_output/kernel:0',
'model/centralized_value_fns/vf/fc0/bias:0',
'model/centralized_value_fns/vf/fc0/kernel:0',
'model/centralized_value_fns/vf/fc1/bias:0',
'model/centralized_value_fns/vf/fc1/kernel:0',
'model/centralized_value_fns/vf/vf_output/bias:0',
'model/centralized_value_fns/vf/vf_output/kernel:0',
'model/log_alpha:0',
'model/pi/fc0/bias:0',
'model/pi/fc0/kernel:0',
'model/pi/fc1/bias:0',
'model/pi/fc1/kernel:0',
'model/pi/log_std/bias:0',
'model/pi/log_std/kernel:0',
'model/pi/mean/bias:0',
'model/pi/mean/kernel:0',
'target/centralized_value_fns/vf/fc0/bias:0',
'target/centralized_value_fns/vf/fc0/kernel:0',
'target/centralized_value_fns/vf/fc1/bias:0',
'target/centralized_value_fns/vf/fc1/kernel:0',
'target/centralized_value_fns/vf/vf_output/bias:0',
'target/centralized_value_fns/vf/vf_output/kernel:0']
)
# Check observation/action/context spaces of the agents
self.assertEqual(int(policy.all_obs_ph.shape[-1]),
policy.all_ob_space.shape[0])
self.assertEqual(int(policy.all_obs1_ph.shape[-1]),
policy.all_ob_space.shape[0])
self.assertEqual(int(policy.all_action_ph.shape[-1]),
policy.n_agents * policy.ac_space.shape[0])
self.assertEqual(int(policy.action_ph[0].shape[-1]),
policy.ac_space.shape[0])
self.assertEqual(int(policy.obs_ph[0].shape[-1]),
int(policy.ob_space.shape[0]
+ policy.co_space.shape[0]))
self.assertEqual(int(policy.obs1_ph[0].shape[-1]),
int(policy.ob_space.shape[0]
+ policy.co_space.shape[0]))
# Check the instantiation of the class attributes.
self.assertTrue(policy.shared)
self.assertTrue(policy.maddpg)
def test_init_5(self):
policy_params = self.policy_params_independent.copy()
policy_params["maddpg"] = True
policy_params["model_params"]["model_type"] = "conv"
_ = SACMultiFeedForwardPolicy(**policy_params)
self.assertListEqual(
sorted([var.name for var in get_trainable_vars()]),
['a/model/centralized_value_fns/qf1/conv0/bias:0',
'a/model/centralized_value_fns/qf1/conv0/kernel:0',
'a/model/centralized_value_fns/qf1/conv1/bias:0',
'a/model/centralized_value_fns/qf1/conv1/kernel:0',
'a/model/centralized_value_fns/qf1/conv2/bias:0',
'a/model/centralized_value_fns/qf1/conv2/kernel:0',
'a/model/centralized_value_fns/qf1/fc0/bias:0',
'a/model/centralized_value_fns/qf1/fc0/kernel:0',
'a/model/centralized_value_fns/qf1/fc1/bias:0',
'a/model/centralized_value_fns/qf1/fc1/kernel:0',
'a/model/centralized_value_fns/qf1/qf_output/bias:0',
'a/model/centralized_value_fns/qf1/qf_output/kernel:0',
'a/model/centralized_value_fns/qf2/conv0/bias:0',
'a/model/centralized_value_fns/qf2/conv0/kernel:0',
'a/model/centralized_value_fns/qf2/conv1/bias:0',
'a/model/centralized_value_fns/qf2/conv1/kernel:0',
'a/model/centralized_value_fns/qf2/conv2/bias:0',
'a/model/centralized_value_fns/qf2/conv2/kernel:0',
'a/model/centralized_value_fns/qf2/fc0/bias:0',
'a/model/centralized_value_fns/qf2/fc0/kernel:0',
'a/model/centralized_value_fns/qf2/fc1/bias:0',
'a/model/centralized_value_fns/qf2/fc1/kernel:0',
'a/model/centralized_value_fns/qf2/qf_output/bias:0',
'a/model/centralized_value_fns/qf2/qf_output/kernel:0',
'a/model/centralized_value_fns/vf/conv0/bias:0',
'a/model/centralized_value_fns/vf/conv0/kernel:0',
'a/model/centralized_value_fns/vf/conv1/bias:0',
'a/model/centralized_value_fns/vf/conv1/kernel:0',
'a/model/centralized_value_fns/vf/conv2/bias:0',
'a/model/centralized_value_fns/vf/conv2/kernel:0',
'a/model/centralized_value_fns/vf/fc0/bias:0',
'a/model/centralized_value_fns/vf/fc0/kernel:0',
'a/model/centralized_value_fns/vf/fc1/bias:0',
'a/model/centralized_value_fns/vf/fc1/kernel:0',
'a/model/centralized_value_fns/vf/vf_output/bias:0',
'a/model/centralized_value_fns/vf/vf_output/kernel:0',
'a/model/log_alpha:0',
'a/model/pi/conv0/bias:0',
'a/model/pi/conv0/kernel:0',
'a/model/pi/conv1/bias:0',
'a/model/pi/conv1/kernel:0',
'a/model/pi/conv2/bias:0',
'a/model/pi/conv2/kernel:0',
'a/model/pi/fc0/bias:0',
'a/model/pi/fc0/kernel:0',
'a/model/pi/fc1/bias:0',
'a/model/pi/fc1/kernel:0',
'a/model/pi/log_std/bias:0',
'a/model/pi/log_std/kernel:0',
'a/model/pi/mean/bias:0',
'a/model/pi/mean/kernel:0',
'a/target/centralized_value_fns/vf/conv0/bias:0',
'a/target/centralized_value_fns/vf/conv0/kernel:0',
'a/target/centralized_value_fns/vf/conv1/bias:0',
'a/target/centralized_value_fns/vf/conv1/kernel:0',
'a/target/centralized_value_fns/vf/conv2/bias:0',
'a/target/centralized_value_fns/vf/conv2/kernel:0',
'a/target/centralized_value_fns/vf/fc0/bias:0',
'a/target/centralized_value_fns/vf/fc0/kernel:0',
'a/target/centralized_value_fns/vf/fc1/bias:0',
'a/target/centralized_value_fns/vf/fc1/kernel:0',
'a/target/centralized_value_fns/vf/vf_output/bias:0',
'a/target/centralized_value_fns/vf/vf_output/kernel:0',
'b/model/centralized_value_fns/qf1/conv0/bias:0',
'b/model/centralized_value_fns/qf1/conv0/kernel:0',
'b/model/centralized_value_fns/qf1/conv1/bias:0',
'b/model/centralized_value_fns/qf1/conv1/kernel:0',
'b/model/centralized_value_fns/qf1/conv2/bias:0',
'b/model/centralized_value_fns/qf1/conv2/kernel:0',
'b/model/centralized_value_fns/qf1/fc0/bias:0',
'b/model/centralized_value_fns/qf1/fc0/kernel:0',
'b/model/centralized_value_fns/qf1/fc1/bias:0',
'b/model/centralized_value_fns/qf1/fc1/kernel:0',
'b/model/centralized_value_fns/qf1/qf_output/bias:0',
'b/model/centralized_value_fns/qf1/qf_output/kernel:0',
'b/model/centralized_value_fns/qf2/conv0/bias:0',
'b/model/centralized_value_fns/qf2/conv0/kernel:0',
'b/model/centralized_value_fns/qf2/conv1/bias:0',
'b/model/centralized_value_fns/qf2/conv1/kernel:0',
'b/model/centralized_value_fns/qf2/conv2/bias:0',
'b/model/centralized_value_fns/qf2/conv2/kernel:0',
'b/model/centralized_value_fns/qf2/fc0/bias:0',
'b/model/centralized_value_fns/qf2/fc0/kernel:0',
'b/model/centralized_value_fns/qf2/fc1/bias:0',
'b/model/centralized_value_fns/qf2/fc1/kernel:0',
'b/model/centralized_value_fns/qf2/qf_output/bias:0',
'b/model/centralized_value_fns/qf2/qf_output/kernel:0',
'b/model/centralized_value_fns/vf/conv0/bias:0',
'b/model/centralized_value_fns/vf/conv0/kernel:0',
'b/model/centralized_value_fns/vf/conv1/bias:0',
'b/model/centralized_value_fns/vf/conv1/kernel:0',
'b/model/centralized_value_fns/vf/conv2/bias:0',
'b/model/centralized_value_fns/vf/conv2/kernel:0',
'b/model/centralized_value_fns/vf/fc0/bias:0',
'b/model/centralized_value_fns/vf/fc0/kernel:0',
'b/model/centralized_value_fns/vf/fc1/bias:0',
'b/model/centralized_value_fns/vf/fc1/kernel:0',
'b/model/centralized_value_fns/vf/vf_output/bias:0',
'b/model/centralized_value_fns/vf/vf_output/kernel:0',
'b/model/log_alpha:0',
'b/model/pi/conv0/bias:0',
'b/model/pi/conv0/kernel:0',
'b/model/pi/conv1/bias:0',
'b/model/pi/conv1/kernel:0',
'b/model/pi/conv2/bias:0',
'b/model/pi/conv2/kernel:0',
'b/model/pi/fc0/bias:0',
'b/model/pi/fc0/kernel:0',
'b/model/pi/fc1/bias:0',
'b/model/pi/fc1/kernel:0',
'b/model/pi/log_std/bias:0',
'b/model/pi/log_std/kernel:0',
'b/model/pi/mean/bias:0',
'b/model/pi/mean/kernel:0',
'b/target/centralized_value_fns/vf/conv0/bias:0',
'b/target/centralized_value_fns/vf/conv0/kernel:0',
'b/target/centralized_value_fns/vf/conv1/bias:0',
'b/target/centralized_value_fns/vf/conv1/kernel:0',
'b/target/centralized_value_fns/vf/conv2/bias:0',
'b/target/centralized_value_fns/vf/conv2/kernel:0',
'b/target/centralized_value_fns/vf/fc0/bias:0',
'b/target/centralized_value_fns/vf/fc0/kernel:0',
'b/target/centralized_value_fns/vf/fc1/bias:0',
'b/target/centralized_value_fns/vf/fc1/kernel:0',
'b/target/centralized_value_fns/vf/vf_output/bias:0',
'b/target/centralized_value_fns/vf/vf_output/kernel:0']
)
def test_init_6(self):
policy_params = self.policy_params_shared.copy()
policy_params["maddpg"] = True
policy_params["model_params"]["model_type"] = "conv"
_ = SACMultiFeedForwardPolicy(**policy_params)
self.assertListEqual(
sorted([var.name for var in get_trainable_vars()]),
['model/centralized_value_fns/qf1/conv0/bias:0',
'model/centralized_value_fns/qf1/conv0/kernel:0',
'model/centralized_value_fns/qf1/conv1/bias:0',
'model/centralized_value_fns/qf1/conv1/kernel:0',
'model/centralized_value_fns/qf1/conv2/bias:0',
'model/centralized_value_fns/qf1/conv2/kernel:0',
'model/centralized_value_fns/qf1/fc0/bias:0',
'model/centralized_value_fns/qf1/fc0/kernel:0',
'model/centralized_value_fns/qf1/fc1/bias:0',
'model/centralized_value_fns/qf1/fc1/kernel:0',
'model/centralized_value_fns/qf1/qf_output/bias:0',
'model/centralized_value_fns/qf1/qf_output/kernel:0',
'model/centralized_value_fns/qf2/conv0/bias:0',
'model/centralized_value_fns/qf2/conv0/kernel:0',
'model/centralized_value_fns/qf2/conv1/bias:0',
'model/centralized_value_fns/qf2/conv1/kernel:0',
'model/centralized_value_fns/qf2/conv2/bias:0',
'model/centralized_value_fns/qf2/conv2/kernel:0',
'model/centralized_value_fns/qf2/fc0/bias:0',
'model/centralized_value_fns/qf2/fc0/kernel:0',
'model/centralized_value_fns/qf2/fc1/bias:0',
'model/centralized_value_fns/qf2/fc1/kernel:0',
'model/centralized_value_fns/qf2/qf_output/bias:0',
'model/centralized_value_fns/qf2/qf_output/kernel:0',
'model/centralized_value_fns/vf/conv0/bias:0',
'model/centralized_value_fns/vf/conv0/kernel:0',
'model/centralized_value_fns/vf/conv1/bias:0',
'model/centralized_value_fns/vf/conv1/kernel:0',
'model/centralized_value_fns/vf/conv2/bias:0',
'model/centralized_value_fns/vf/conv2/kernel:0',
'model/centralized_value_fns/vf/fc0/bias:0',
'model/centralized_value_fns/vf/fc0/kernel:0',
'model/centralized_value_fns/vf/fc1/bias:0',
'model/centralized_value_fns/vf/fc1/kernel:0',
'model/centralized_value_fns/vf/vf_output/bias:0',
'model/centralized_value_fns/vf/vf_output/kernel:0',
'model/log_alpha:0',
'model/pi/conv0/bias:0',
'model/pi/conv0/kernel:0',
'model/pi/conv1/bias:0',
'model/pi/conv1/kernel:0',
'model/pi/conv2/bias:0',
'model/pi/conv2/kernel:0',
'model/pi/fc0/bias:0',
'model/pi/fc0/kernel:0',
'model/pi/fc1/bias:0',
'model/pi/fc1/kernel:0',
'model/pi/log_std/bias:0',
'model/pi/log_std/kernel:0',
'model/pi/mean/bias:0',
'model/pi/mean/kernel:0',
'target/centralized_value_fns/vf/conv0/bias:0',
'target/centralized_value_fns/vf/conv0/kernel:0',
'target/centralized_value_fns/vf/conv1/bias:0',
'target/centralized_value_fns/vf/conv1/kernel:0',
'target/centralized_value_fns/vf/conv2/bias:0',
'target/centralized_value_fns/vf/conv2/kernel:0',
'target/centralized_value_fns/vf/fc0/bias:0',
'target/centralized_value_fns/vf/fc0/kernel:0',
'target/centralized_value_fns/vf/fc1/bias:0',
'target/centralized_value_fns/vf/fc1/kernel:0',
'target/centralized_value_fns/vf/vf_output/bias:0',
'target/centralized_value_fns/vf/vf_output/kernel:0']
)
def test_initialize_1(self):
"""Check the functionality of the initialize() method.
This test validates that the target variables are properly initialized
when initialize is called.
This is done for the following cases:
1. maddpg = False, shared = False
2. maddpg = False, shared = True
3. maddpg = True, shared = False
4. maddpg = True, shared = True
"""
policy_params = self.policy_params_independent.copy()
policy_params["maddpg"] = False
policy = SACMultiFeedForwardPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
model_var_list = [
'a/model/value_fns/vf/fc0/kernel:0',
'a/model/value_fns/vf/fc0/bias:0',
'a/model/value_fns/vf/fc1/kernel:0',
'a/model/value_fns/vf/fc1/bias:0',
'a/model/value_fns/vf/vf_output/kernel:0',
'a/model/value_fns/vf/vf_output/bias:0',
'b/model/value_fns/vf/fc0/kernel:0',
'b/model/value_fns/vf/fc0/bias:0',
'b/model/value_fns/vf/fc1/kernel:0',
'b/model/value_fns/vf/fc1/bias:0',
'b/model/value_fns/vf/vf_output/kernel:0',
'b/model/value_fns/vf/vf_output/bias:0',
]
target_var_list = [
'a/target/value_fns/vf/fc0/kernel:0',
'a/target/value_fns/vf/fc0/bias:0',
'a/target/value_fns/vf/fc1/kernel:0',
'a/target/value_fns/vf/fc1/bias:0',
'a/target/value_fns/vf/vf_output/kernel:0',
'a/target/value_fns/vf/vf_output/bias:0',
'b/target/value_fns/vf/fc0/kernel:0',
'b/target/value_fns/vf/fc0/bias:0',
'b/target/value_fns/vf/fc1/kernel:0',
'b/target/value_fns/vf/fc1/bias:0',
'b/target/value_fns/vf/vf_output/kernel:0',
'b/target/value_fns/vf/vf_output/bias:0',
]
for model, target in zip(model_var_list, target_var_list):
with tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope(), reuse=True):
model_val = policy.sess.run(model)
target_val = policy.sess.run(target)
np.testing.assert_almost_equal(model_val, target_val)
def test_initialize_2(self):
policy_params = self.policy_params_shared.copy()
policy_params["maddpg"] = False
policy = SACMultiFeedForwardPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
model_var_list = [
'model/value_fns/vf/fc0/kernel:0',
'model/value_fns/vf/fc0/bias:0',
'model/value_fns/vf/fc1/kernel:0',
'model/value_fns/vf/fc1/bias:0',
'model/value_fns/vf/vf_output/kernel:0',
'model/value_fns/vf/vf_output/bias:0',
]
target_var_list = [
'target/value_fns/vf/fc0/kernel:0',
'target/value_fns/vf/fc0/bias:0',
'target/value_fns/vf/fc1/kernel:0',
'target/value_fns/vf/fc1/bias:0',
'target/value_fns/vf/vf_output/kernel:0',
'target/value_fns/vf/vf_output/bias:0',
]
for model, target in zip(model_var_list, target_var_list):
with tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope(), reuse=True):
model_val = policy.sess.run(model)
target_val = policy.sess.run(target)
np.testing.assert_almost_equal(model_val, target_val)
def test_initialize_3(self):
policy_params = self.policy_params_independent.copy()
policy_params["maddpg"] = True
policy = SACMultiFeedForwardPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
model_var_list = [
'a/model/centralized_value_fns/vf/fc0/kernel:0',
'a/model/centralized_value_fns/vf/fc0/bias:0',
'a/model/centralized_value_fns/vf/fc1/kernel:0',
'a/model/centralized_value_fns/vf/fc1/bias:0',
'a/model/centralized_value_fns/vf/vf_output/kernel:0',
'a/model/centralized_value_fns/vf/vf_output/bias:0',
'b/model/centralized_value_fns/vf/fc0/kernel:0',
'b/model/centralized_value_fns/vf/fc0/bias:0',
'b/model/centralized_value_fns/vf/fc1/kernel:0',
'b/model/centralized_value_fns/vf/fc1/bias:0',
'b/model/centralized_value_fns/vf/vf_output/kernel:0',
'b/model/centralized_value_fns/vf/vf_output/bias:0',
]
target_var_list = [
'a/target/centralized_value_fns/vf/fc0/kernel:0',
'a/target/centralized_value_fns/vf/fc0/bias:0',
'a/target/centralized_value_fns/vf/fc1/kernel:0',
'a/target/centralized_value_fns/vf/fc1/bias:0',
'a/target/centralized_value_fns/vf/vf_output/kernel:0',
'a/target/centralized_value_fns/vf/vf_output/bias:0',
'b/target/centralized_value_fns/vf/fc0/kernel:0',
'b/target/centralized_value_fns/vf/fc0/bias:0',
'b/target/centralized_value_fns/vf/fc1/kernel:0',
'b/target/centralized_value_fns/vf/fc1/bias:0',
'b/target/centralized_value_fns/vf/vf_output/kernel:0',
'b/target/centralized_value_fns/vf/vf_output/bias:0',
]
for model, target in zip(model_var_list, target_var_list):
with tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope(), reuse=True):
model_val = policy.sess.run(model)
target_val = policy.sess.run(target)
np.testing.assert_almost_equal(model_val, target_val)
def test_initialize_4(self):
policy_params = self.policy_params_shared.copy()
policy_params["maddpg"] = True
policy = SACMultiFeedForwardPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
model_var_list = [
'model/centralized_value_fns/vf/fc0/bias:0',
'model/centralized_value_fns/vf/fc0/kernel:0',
'model/centralized_value_fns/vf/fc1/bias:0',
'model/centralized_value_fns/vf/fc1/kernel:0',
'model/centralized_value_fns/vf/vf_output/bias:0',
'model/centralized_value_fns/vf/vf_output/kernel:0',
]
target_var_list = [
'target/centralized_value_fns/vf/fc0/bias:0',
'target/centralized_value_fns/vf/fc0/kernel:0',
'target/centralized_value_fns/vf/fc1/bias:0',
'target/centralized_value_fns/vf/fc1/kernel:0',
'target/centralized_value_fns/vf/vf_output/bias:0',
'target/centralized_value_fns/vf/vf_output/kernel:0',
]
for model, target in zip(model_var_list, target_var_list):
with tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope(), reuse=True):
model_val = policy.sess.run(model)
target_val = policy.sess.run(target)
np.testing.assert_almost_equal(model_val, target_val)
def test_store_transition_1(self):
"""Check the functionality of the store_transition() method.
This test checks for the following cases:
1. maddpg = True, shared = False
2. maddpg = True, shared = True
"""
policy_params = self.policy_params_independent.copy()
policy_params["maddpg"] = True
policy = SACMultiFeedForwardPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
for i in range(4):
action_0 = np.array([i for _ in range(1)])
action_1 = np.array([i for _ in range(2)])
context0_0 = np.array([i for _ in range(3)])
context0_1 = np.array([i for _ in range(4)])
obs0_0 = np.array([i for _ in range(5)])
obs0_1 = np.array([i for _ in range(6)])
reward = i
obs1_0 = np.array([i+1 for _ in range(5)])
obs1_1 = np.array([i+1 for _ in range(6)])
context1_0 = np.array([i for _ in range(3)])
context1_1 = np.array([i for _ in range(4)])
done = False
is_final_step = False
evaluate = False
all_obs0 = np.array([i for _ in range(18)])
all_obs1 = np.array([i+1 for _ in range(18)])
policy.store_transition(
obs0={"a": obs0_0, "b": obs0_1},
context0={"a": context0_0, "b": context0_1},
action={"a": action_0, "b": action_1},
reward={"a": reward, "b": reward},
obs1={"a": obs1_0, "b": obs1_1},
context1={"a": context1_0, "b": context1_1},
done=done,
is_final_step=is_final_step,
evaluate=evaluate,
env_num=0,
all_obs0=all_obs0,
all_obs1=all_obs1,
)
# =================================================================== #
# test for agent a #
# =================================================================== #
obs_t = policy.replay_buffer["a"].obs_t
action_t = policy.replay_buffer["a"].action_t
reward = policy.replay_buffer["a"].reward
done = policy.replay_buffer["a"].done
all_obs_t = policy.replay_buffer["a"].all_obs_t
# check the various attributes
np.testing.assert_almost_equal(
obs_t[:4, :],
np.array([[0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2.],
[3., 3., 3., 3., 3., 3., 3., 3.]])
)
np.testing.assert_almost_equal(
action_t[:4, :],
np.array([[0.], [1.], [2.], [3.]])
)
np.testing.assert_almost_equal(
reward[:4],
np.array([0., 1., 2., 3.])
)
np.testing.assert_almost_equal(
done[:4],
[0., 0., 0., 0.]
)
np.testing.assert_almost_equal(
all_obs_t[:4, :],
np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2.],
[3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3.,
3., 3., 3., 3.]])
)
# =================================================================== #
# test for agent b #
# =================================================================== #
obs_t = policy.replay_buffer["b"].obs_t
action_t = policy.replay_buffer["b"].action_t
reward = policy.replay_buffer["b"].reward
done = policy.replay_buffer["b"].done
all_obs_t = policy.replay_buffer["b"].all_obs_t
# check the various attributes
np.testing.assert_almost_equal(
obs_t[:4, :],
np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 2.],
[3., 3., 3., 3., 3., 3., 3., 3., 3., 3.]])
)
np.testing.assert_almost_equal(
action_t[:4, :],
np.array([[0., 0.], [1., 1.], [2., 2.], [3., 3.]])
)
np.testing.assert_almost_equal(
reward[:4],
np.array([0., 1., 2., 3.])
)
np.testing.assert_almost_equal(
done[:4],
[0., 0., 0., 0.]
)
np.testing.assert_almost_equal(
all_obs_t[:4, :],
np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2.],
[3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3.,
3., 3., 3., 3.]])
)
def test_store_transition_4(self):
policy_params = self.policy_params_shared.copy()
policy_params["maddpg"] = True
policy_params["n_agents"] = 2
policy = SACMultiFeedForwardPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
for i in range(4):
obs0 = np.array([i for _ in range(2)])
context0 = np.array([i for _ in range(3)])
action = np.array([i for _ in range(1)])
reward = i
obs1 = np.array([i+1 for _ in range(2)])
context1 = np.array([i for _ in range(3)])
is_final_step = False
evaluate = False
all_obs0 = np.array([i for _ in range(10)])
all_obs1 = np.array([i+1 for _ in range(10)])
policy.store_transition(
obs0={"a": obs0, "b": obs0 + 1},
context0={"a": context0, "b": context0 + 1},
action={"a": action, "b": action + 1},
reward={"a": reward, "b": reward + 1},
obs1={"a": obs1, "b": obs1 + 1},
context1={"a": context1, "b": context1 + 1},
done=0.,
is_final_step=is_final_step,
evaluate=evaluate,
env_num=0,
all_obs0=all_obs0,
all_obs1=all_obs1,
)
# extract the attributes
obs_t = policy.replay_buffer.obs_t
action_t = policy.replay_buffer.action
reward = policy.replay_buffer.reward
done = policy.replay_buffer.done
all_obs_t = policy.replay_buffer.all_obs_t
# check the various attributes
np.testing.assert_almost_equal(
obs_t[0][:4, :],
np.array([[0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2.],
[3., 3., 3., 3., 3.]])
)
np.testing.assert_almost_equal(
action_t[0][:4, :],
np.array([[0.], [1.], [2.], [3.]])
)
np.testing.assert_almost_equal(
reward[:4],
np.array([0., 1., 2., 3.])
)
np.testing.assert_almost_equal(
done[:4],
[0., 0., 0., 0.]
)
np.testing.assert_almost_equal(
all_obs_t[:4, :],
np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 2.],
[3., 3., 3., 3., 3., 3., 3., 3., 3., 3.]])
)
class TestTD3MultiGoalConditionedPolicy(unittest.TestCase):
"""Test MultiFeedForwardPolicy in hbaselines/multiagent/h_td3.py."""
def setUp(self):
self.sess = tf.compat.v1.Session()
# Shared policy parameters
self.policy_params_shared = {
'sess': self.sess,
'ac_space': Box(low=-1, high=1, shape=(1,)),
'co_space': Box(low=-2, high=2, shape=(2,)),
'ob_space': Box(low=-3, high=3, shape=(3,)),
'all_ob_space': Box(low=-3, high=3, shape=(10,)),
'verbose': 0,
}
self.policy_params_shared.update(TD3_PARAMS.copy())
self.policy_params_shared.update(GOAL_CONDITIONED_PARAMS.copy())
self.policy_params_shared.update(MULTIAGENT_PARAMS.copy())
self.policy_params_shared['shared'] = True
# Independent policy parameters
self.policy_params_independent = {
'sess': self.sess,
'ac_space': {
'a': Box(low=-1, high=1, shape=(1,)),
'b': Box(low=-2, high=2, shape=(2,)),
},
'co_space': {
'a': Box(low=-3, high=3, shape=(3,)),
'b': Box(low=-4, high=4, shape=(4,)),
},
'ob_space': {
'a': Box(low=-5, high=5, shape=(5,)),
'b': Box(low=-6, high=6, shape=(6,)),
},
'all_ob_space': Box(low=-6, high=6, shape=(18,)),
'verbose': 0,
}
self.policy_params_independent.update(TD3_PARAMS.copy())
self.policy_params_independent.update(GOAL_CONDITIONED_PARAMS.copy())
self.policy_params_independent.update(MULTIAGENT_PARAMS.copy())
self.policy_params_independent['shared'] = False
def tearDown(self):
self.sess.close()
del self.policy_params_shared
del self.policy_params_independent
# Clear the graph.
tf.compat.v1.reset_default_graph()
def test_init_1(self):
"""Check the functionality of the __init__() method.
This method is tested for the following features:
1. The proper structure graph was generated.
2. All input placeholders are correct.
This is done for the following cases:
1. maddpg = False, shared = False
2. maddpg = False, shared = True
"""
policy_params = self.policy_params_independent.copy()
policy_params["maddpg"] = False
policy = TD3MultiGoalConditionedPolicy(**policy_params)
self.assertListEqual(
sorted([var.name for var in get_trainable_vars()]),
['a/level_0/model/pi/fc0/bias:0',
'a/level_0/model/pi/fc0/kernel:0',
'a/level_0/model/pi/fc1/bias:0',
'a/level_0/model/pi/fc1/kernel:0',
'a/level_0/model/pi/output/bias:0',
'a/level_0/model/pi/output/kernel:0',
'a/level_0/model/qf_0/fc0/bias:0',
'a/level_0/model/qf_0/fc0/kernel:0',
'a/level_0/model/qf_0/fc1/bias:0',
'a/level_0/model/qf_0/fc1/kernel:0',
'a/level_0/model/qf_0/qf_output/bias:0',
'a/level_0/model/qf_0/qf_output/kernel:0',
'a/level_0/model/qf_1/fc0/bias:0',
'a/level_0/model/qf_1/fc0/kernel:0',
'a/level_0/model/qf_1/fc1/bias:0',
'a/level_0/model/qf_1/fc1/kernel:0',
'a/level_0/model/qf_1/qf_output/bias:0',
'a/level_0/model/qf_1/qf_output/kernel:0',
'a/level_0/target/pi/fc0/bias:0',
'a/level_0/target/pi/fc0/kernel:0',
'a/level_0/target/pi/fc1/bias:0',
'a/level_0/target/pi/fc1/kernel:0',
'a/level_0/target/pi/output/bias:0',
'a/level_0/target/pi/output/kernel:0',
'a/level_0/target/qf_0/fc0/bias:0',
'a/level_0/target/qf_0/fc0/kernel:0',
'a/level_0/target/qf_0/fc1/bias:0',
'a/level_0/target/qf_0/fc1/kernel:0',
'a/level_0/target/qf_0/qf_output/bias:0',
'a/level_0/target/qf_0/qf_output/kernel:0',
'a/level_0/target/qf_1/fc0/bias:0',
'a/level_0/target/qf_1/fc0/kernel:0',
'a/level_0/target/qf_1/fc1/bias:0',
'a/level_0/target/qf_1/fc1/kernel:0',
'a/level_0/target/qf_1/qf_output/bias:0',
'a/level_0/target/qf_1/qf_output/kernel:0',
'a/level_1/model/pi/fc0/bias:0',
'a/level_1/model/pi/fc0/kernel:0',
'a/level_1/model/pi/fc1/bias:0',
'a/level_1/model/pi/fc1/kernel:0',
'a/level_1/model/pi/output/bias:0',
'a/level_1/model/pi/output/kernel:0',
'a/level_1/model/qf_0/fc0/bias:0',
'a/level_1/model/qf_0/fc0/kernel:0',
'a/level_1/model/qf_0/fc1/bias:0',
'a/level_1/model/qf_0/fc1/kernel:0',
'a/level_1/model/qf_0/qf_output/bias:0',
'a/level_1/model/qf_0/qf_output/kernel:0',
'a/level_1/model/qf_1/fc0/bias:0',
'a/level_1/model/qf_1/fc0/kernel:0',
'a/level_1/model/qf_1/fc1/bias:0',
'a/level_1/model/qf_1/fc1/kernel:0',
'a/level_1/model/qf_1/qf_output/bias:0',
'a/level_1/model/qf_1/qf_output/kernel:0',
'a/level_1/target/pi/fc0/bias:0',
'a/level_1/target/pi/fc0/kernel:0',
'a/level_1/target/pi/fc1/bias:0',
'a/level_1/target/pi/fc1/kernel:0',
'a/level_1/target/pi/output/bias:0',
'a/level_1/target/pi/output/kernel:0',
'a/level_1/target/qf_0/fc0/bias:0',
'a/level_1/target/qf_0/fc0/kernel:0',
'a/level_1/target/qf_0/fc1/bias:0',
'a/level_1/target/qf_0/fc1/kernel:0',
'a/level_1/target/qf_0/qf_output/bias:0',
'a/level_1/target/qf_0/qf_output/kernel:0',
'a/level_1/target/qf_1/fc0/bias:0',
'a/level_1/target/qf_1/fc0/kernel:0',
'a/level_1/target/qf_1/fc1/bias:0',
'a/level_1/target/qf_1/fc1/kernel:0',
'a/level_1/target/qf_1/qf_output/bias:0',
'a/level_1/target/qf_1/qf_output/kernel:0',
'b/level_0/model/pi/fc0/bias:0',
'b/level_0/model/pi/fc0/kernel:0',
'b/level_0/model/pi/fc1/bias:0',
'b/level_0/model/pi/fc1/kernel:0',
'b/level_0/model/pi/output/bias:0',
'b/level_0/model/pi/output/kernel:0',
'b/level_0/model/qf_0/fc0/bias:0',
'b/level_0/model/qf_0/fc0/kernel:0',
'b/level_0/model/qf_0/fc1/bias:0',
'b/level_0/model/qf_0/fc1/kernel:0',
'b/level_0/model/qf_0/qf_output/bias:0',
'b/level_0/model/qf_0/qf_output/kernel:0',
'b/level_0/model/qf_1/fc0/bias:0',
'b/level_0/model/qf_1/fc0/kernel:0',
'b/level_0/model/qf_1/fc1/bias:0',
'b/level_0/model/qf_1/fc1/kernel:0',
'b/level_0/model/qf_1/qf_output/bias:0',
'b/level_0/model/qf_1/qf_output/kernel:0',
'b/level_0/target/pi/fc0/bias:0',
'b/level_0/target/pi/fc0/kernel:0',
'b/level_0/target/pi/fc1/bias:0',
'b/level_0/target/pi/fc1/kernel:0',
'b/level_0/target/pi/output/bias:0',
'b/level_0/target/pi/output/kernel:0',
'b/level_0/target/qf_0/fc0/bias:0',
'b/level_0/target/qf_0/fc0/kernel:0',
'b/level_0/target/qf_0/fc1/bias:0',
'b/level_0/target/qf_0/fc1/kernel:0',
'b/level_0/target/qf_0/qf_output/bias:0',
'b/level_0/target/qf_0/qf_output/kernel:0',
'b/level_0/target/qf_1/fc0/bias:0',
'b/level_0/target/qf_1/fc0/kernel:0',
'b/level_0/target/qf_1/fc1/bias:0',
'b/level_0/target/qf_1/fc1/kernel:0',
'b/level_0/target/qf_1/qf_output/bias:0',
'b/level_0/target/qf_1/qf_output/kernel:0',
'b/level_1/model/pi/fc0/bias:0',
'b/level_1/model/pi/fc0/kernel:0',
'b/level_1/model/pi/fc1/bias:0',
'b/level_1/model/pi/fc1/kernel:0',
'b/level_1/model/pi/output/bias:0',
'b/level_1/model/pi/output/kernel:0',
'b/level_1/model/qf_0/fc0/bias:0',
'b/level_1/model/qf_0/fc0/kernel:0',
'b/level_1/model/qf_0/fc1/bias:0',
'b/level_1/model/qf_0/fc1/kernel:0',
'b/level_1/model/qf_0/qf_output/bias:0',
'b/level_1/model/qf_0/qf_output/kernel:0',
'b/level_1/model/qf_1/fc0/bias:0',
'b/level_1/model/qf_1/fc0/kernel:0',
'b/level_1/model/qf_1/fc1/bias:0',
'b/level_1/model/qf_1/fc1/kernel:0',
'b/level_1/model/qf_1/qf_output/bias:0',
'b/level_1/model/qf_1/qf_output/kernel:0',
'b/level_1/target/pi/fc0/bias:0',
'b/level_1/target/pi/fc0/kernel:0',
'b/level_1/target/pi/fc1/bias:0',
'b/level_1/target/pi/fc1/kernel:0',
'b/level_1/target/pi/output/bias:0',
'b/level_1/target/pi/output/kernel:0',
'b/level_1/target/qf_0/fc0/bias:0',
'b/level_1/target/qf_0/fc0/kernel:0',
'b/level_1/target/qf_0/fc1/bias:0',
'b/level_1/target/qf_0/fc1/kernel:0',
'b/level_1/target/qf_0/qf_output/bias:0',
'b/level_1/target/qf_0/qf_output/kernel:0',
'b/level_1/target/qf_1/fc0/bias:0',
'b/level_1/target/qf_1/fc0/kernel:0',
'b/level_1/target/qf_1/fc1/bias:0',
'b/level_1/target/qf_1/fc1/kernel:0',
'b/level_1/target/qf_1/qf_output/bias:0',
'b/level_1/target/qf_1/qf_output/kernel:0']
)
# Check observation/action/context spaces of the agents
self.assertEqual(
policy.agents['a'].policy[0].ac_space.shape[0],
self.policy_params_independent['ob_space']['a'].shape[0]
)
self.assertEqual(
policy.agents['a'].policy[0].ob_space.shape[0],
self.policy_params_independent['ob_space']['a'].shape[0]
)
self.assertEqual(
policy.agents['a'].policy[0].co_space.shape[0],
self.policy_params_independent['co_space']['a'].shape[0]
)
self.assertEqual(
policy.agents['a'].policy[1].ac_space.shape[0],
self.policy_params_independent['ac_space']['a'].shape[0]
)
self.assertEqual(
policy.agents['a'].policy[1].ob_space.shape[0],
self.policy_params_independent['ob_space']['a'].shape[0]
)
self.assertEqual(
policy.agents['a'].policy[1].co_space.shape[0],
self.policy_params_independent['ob_space']['a'].shape[0]
)
self.assertEqual(
policy.agents['b'].policy[0].ac_space.shape[0],
self.policy_params_independent['ob_space']['b'].shape[0]
)
self.assertEqual(
policy.agents['b'].policy[0].ob_space.shape[0],
self.policy_params_independent['ob_space']['b'].shape[0]
)
self.assertEqual(
policy.agents['b'].policy[0].co_space.shape[0],
self.policy_params_independent['co_space']['b'].shape[0]
)
self.assertEqual(
policy.agents['b'].policy[1].ac_space.shape[0],
self.policy_params_independent['ac_space']['b'].shape[0]
)
self.assertEqual(
policy.agents['b'].policy[1].ob_space.shape[0],
self.policy_params_independent['ob_space']['b'].shape[0]
)
self.assertEqual(
policy.agents['b'].policy[1].co_space.shape[0],
self.policy_params_independent['ob_space']['b'].shape[0]
)
# Check the instantiation of the class attributes.
self.assertTrue(not policy.shared)
self.assertTrue(not policy.maddpg)
def test_init_2(self):
policy_params = self.policy_params_shared.copy()
policy_params["maddpg"] = False
policy = TD3MultiGoalConditionedPolicy(**policy_params)
self.assertListEqual(
sorted([var.name for var in get_trainable_vars()]),
['level_0/model/pi/fc0/bias:0',
'level_0/model/pi/fc0/kernel:0',
'level_0/model/pi/fc1/bias:0',
'level_0/model/pi/fc1/kernel:0',
'level_0/model/pi/output/bias:0',
'level_0/model/pi/output/kernel:0',
'level_0/model/qf_0/fc0/bias:0',
'level_0/model/qf_0/fc0/kernel:0',
'level_0/model/qf_0/fc1/bias:0',
'level_0/model/qf_0/fc1/kernel:0',
'level_0/model/qf_0/qf_output/bias:0',
'level_0/model/qf_0/qf_output/kernel:0',
'level_0/model/qf_1/fc0/bias:0',
'level_0/model/qf_1/fc0/kernel:0',
'level_0/model/qf_1/fc1/bias:0',
'level_0/model/qf_1/fc1/kernel:0',
'level_0/model/qf_1/qf_output/bias:0',
'level_0/model/qf_1/qf_output/kernel:0',
'level_0/target/pi/fc0/bias:0',
'level_0/target/pi/fc0/kernel:0',
'level_0/target/pi/fc1/bias:0',
'level_0/target/pi/fc1/kernel:0',
'level_0/target/pi/output/bias:0',
'level_0/target/pi/output/kernel:0',
'level_0/target/qf_0/fc0/bias:0',
'level_0/target/qf_0/fc0/kernel:0',
'level_0/target/qf_0/fc1/bias:0',
'level_0/target/qf_0/fc1/kernel:0',
'level_0/target/qf_0/qf_output/bias:0',
'level_0/target/qf_0/qf_output/kernel:0',
'level_0/target/qf_1/fc0/bias:0',
'level_0/target/qf_1/fc0/kernel:0',
'level_0/target/qf_1/fc1/bias:0',
'level_0/target/qf_1/fc1/kernel:0',
'level_0/target/qf_1/qf_output/bias:0',
'level_0/target/qf_1/qf_output/kernel:0',
'level_1/model/pi/fc0/bias:0',
'level_1/model/pi/fc0/kernel:0',
'level_1/model/pi/fc1/bias:0',
'level_1/model/pi/fc1/kernel:0',
'level_1/model/pi/output/bias:0',
'level_1/model/pi/output/kernel:0',
'level_1/model/qf_0/fc0/bias:0',
'level_1/model/qf_0/fc0/kernel:0',
'level_1/model/qf_0/fc1/bias:0',
'level_1/model/qf_0/fc1/kernel:0',
'level_1/model/qf_0/qf_output/bias:0',
'level_1/model/qf_0/qf_output/kernel:0',
'level_1/model/qf_1/fc0/bias:0',
'level_1/model/qf_1/fc0/kernel:0',
'level_1/model/qf_1/fc1/bias:0',
'level_1/model/qf_1/fc1/kernel:0',
'level_1/model/qf_1/qf_output/bias:0',
'level_1/model/qf_1/qf_output/kernel:0',
'level_1/target/pi/fc0/bias:0',
'level_1/target/pi/fc0/kernel:0',
'level_1/target/pi/fc1/bias:0',
'level_1/target/pi/fc1/kernel:0',
'level_1/target/pi/output/bias:0',
'level_1/target/pi/output/kernel:0',
'level_1/target/qf_0/fc0/bias:0',
'level_1/target/qf_0/fc0/kernel:0',
'level_1/target/qf_0/fc1/bias:0',
'level_1/target/qf_0/fc1/kernel:0',
'level_1/target/qf_0/qf_output/bias:0',
'level_1/target/qf_0/qf_output/kernel:0',
'level_1/target/qf_1/fc0/bias:0',
'level_1/target/qf_1/fc0/kernel:0',
'level_1/target/qf_1/fc1/bias:0',
'level_1/target/qf_1/fc1/kernel:0',
'level_1/target/qf_1/qf_output/bias:0',
'level_1/target/qf_1/qf_output/kernel:0']
)
# Check observation/action/context spaces of the agents
self.assertEqual(
policy.agents['policy'].policy[0].ac_space.shape[0],
self.policy_params_shared['ob_space'].shape[0]
)
self.assertEqual(
policy.agents['policy'].policy[0].ob_space.shape[0],
self.policy_params_shared['ob_space'].shape[0]
)
self.assertEqual(
policy.agents['policy'].policy[0].co_space.shape[0],
self.policy_params_shared['co_space'].shape[0]
)
self.assertEqual(
policy.agents['policy'].policy[1].ac_space.shape[0],
self.policy_params_shared['ac_space'].shape[0]
)
self.assertEqual(
policy.agents['policy'].policy[1].ob_space.shape[0],
self.policy_params_shared['ob_space'].shape[0]
)
self.assertEqual(
policy.agents['policy'].policy[1].co_space.shape[0],
self.policy_params_shared['ob_space'].shape[0]
)
# Check the instantiation of the class attributes.
self.assertTrue(policy.shared)
self.assertTrue(not policy.maddpg)
def test_initialize_1(self):
"""Check the functionality of the initialize() method.
This test validates that the target variables are properly initialized
when initialize is called.
This is done for the following cases:
1. maddpg = False, shared = False
2. maddpg = False, shared = True
"""
policy_params = self.policy_params_independent.copy()
policy_params["maddpg"] = False
policy = TD3MultiGoalConditionedPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
model_var_list = [
'a/level_0/model/pi/fc0/bias:0',
'a/level_0/model/pi/fc0/kernel:0',
'a/level_0/model/pi/fc1/bias:0',
'a/level_0/model/pi/fc1/kernel:0',
'a/level_0/model/pi/output/bias:0',
'a/level_0/model/pi/output/kernel:0',
'a/level_0/model/qf_0/fc0/bias:0',
'a/level_0/model/qf_0/fc0/kernel:0',
'a/level_0/model/qf_0/fc1/bias:0',
'a/level_0/model/qf_0/fc1/kernel:0',
'a/level_0/model/qf_0/qf_output/bias:0',
'a/level_0/model/qf_0/qf_output/kernel:0',
'a/level_0/model/qf_1/fc0/bias:0',
'a/level_0/model/qf_1/fc0/kernel:0',
'a/level_0/model/qf_1/fc1/bias:0',
'a/level_0/model/qf_1/fc1/kernel:0',
'a/level_0/model/qf_1/qf_output/bias:0',
'a/level_0/model/qf_1/qf_output/kernel:0',
'a/level_1/model/pi/fc0/bias:0',
'a/level_1/model/pi/fc0/kernel:0',
'a/level_1/model/pi/fc1/bias:0',
'a/level_1/model/pi/fc1/kernel:0',
'a/level_1/model/pi/output/bias:0',
'a/level_1/model/pi/output/kernel:0',
'a/level_1/model/qf_0/fc0/bias:0',
'a/level_1/model/qf_0/fc0/kernel:0',
'a/level_1/model/qf_0/fc1/bias:0',
'a/level_1/model/qf_0/fc1/kernel:0',
'a/level_1/model/qf_0/qf_output/bias:0',
'a/level_1/model/qf_0/qf_output/kernel:0',
'a/level_1/model/qf_1/fc0/bias:0',
'a/level_1/model/qf_1/fc0/kernel:0',
'a/level_1/model/qf_1/fc1/bias:0',
'a/level_1/model/qf_1/fc1/kernel:0',
'a/level_1/model/qf_1/qf_output/bias:0',
'a/level_1/model/qf_1/qf_output/kernel:0',
'b/level_0/model/pi/fc0/bias:0',
'b/level_0/model/pi/fc0/kernel:0',
'b/level_0/model/pi/fc1/bias:0',
'b/level_0/model/pi/fc1/kernel:0',
'b/level_0/model/pi/output/bias:0',
'b/level_0/model/pi/output/kernel:0',
'b/level_0/model/qf_0/fc0/bias:0',
'b/level_0/model/qf_0/fc0/kernel:0',
'b/level_0/model/qf_0/fc1/bias:0',
'b/level_0/model/qf_0/fc1/kernel:0',
'b/level_0/model/qf_0/qf_output/bias:0',
'b/level_0/model/qf_0/qf_output/kernel:0',
'b/level_0/model/qf_1/fc0/bias:0',
'b/level_0/model/qf_1/fc0/kernel:0',
'b/level_0/model/qf_1/fc1/bias:0',
'b/level_0/model/qf_1/fc1/kernel:0',
'b/level_0/model/qf_1/qf_output/bias:0',
'b/level_0/model/qf_1/qf_output/kernel:0',
'b/level_1/model/pi/fc0/bias:0',
'b/level_1/model/pi/fc0/kernel:0',
'b/level_1/model/pi/fc1/bias:0',
'b/level_1/model/pi/fc1/kernel:0',
'b/level_1/model/pi/output/bias:0',
'b/level_1/model/pi/output/kernel:0',
'b/level_1/model/qf_0/fc0/bias:0',
'b/level_1/model/qf_0/fc0/kernel:0',
'b/level_1/model/qf_0/fc1/bias:0',
'b/level_1/model/qf_0/fc1/kernel:0',
'b/level_1/model/qf_0/qf_output/bias:0',
'b/level_1/model/qf_0/qf_output/kernel:0',
'b/level_1/model/qf_1/fc0/bias:0',
'b/level_1/model/qf_1/fc0/kernel:0',
'b/level_1/model/qf_1/fc1/bias:0',
'b/level_1/model/qf_1/fc1/kernel:0',
'b/level_1/model/qf_1/qf_output/bias:0',
'b/level_1/model/qf_1/qf_output/kernel:0',
]
target_var_list = [
'a/level_0/target/pi/fc0/bias:0',
'a/level_0/target/pi/fc0/kernel:0',
'a/level_0/target/pi/fc1/bias:0',
'a/level_0/target/pi/fc1/kernel:0',
'a/level_0/target/pi/output/bias:0',
'a/level_0/target/pi/output/kernel:0',
'a/level_0/target/qf_0/fc0/bias:0',
'a/level_0/target/qf_0/fc0/kernel:0',
'a/level_0/target/qf_0/fc1/bias:0',
'a/level_0/target/qf_0/fc1/kernel:0',
'a/level_0/target/qf_0/qf_output/bias:0',
'a/level_0/target/qf_0/qf_output/kernel:0',
'a/level_0/target/qf_1/fc0/bias:0',
'a/level_0/target/qf_1/fc0/kernel:0',
'a/level_0/target/qf_1/fc1/bias:0',
'a/level_0/target/qf_1/fc1/kernel:0',
'a/level_0/target/qf_1/qf_output/bias:0',
'a/level_0/target/qf_1/qf_output/kernel:0',
'a/level_1/target/pi/fc0/bias:0',
'a/level_1/target/pi/fc0/kernel:0',
'a/level_1/target/pi/fc1/bias:0',
'a/level_1/target/pi/fc1/kernel:0',
'a/level_1/target/pi/output/bias:0',
'a/level_1/target/pi/output/kernel:0',
'a/level_1/target/qf_0/fc0/bias:0',
'a/level_1/target/qf_0/fc0/kernel:0',
'a/level_1/target/qf_0/fc1/bias:0',
'a/level_1/target/qf_0/fc1/kernel:0',
'a/level_1/target/qf_0/qf_output/bias:0',
'a/level_1/target/qf_0/qf_output/kernel:0',
'a/level_1/target/qf_1/fc0/bias:0',
'a/level_1/target/qf_1/fc0/kernel:0',
'a/level_1/target/qf_1/fc1/bias:0',
'a/level_1/target/qf_1/fc1/kernel:0',
'a/level_1/target/qf_1/qf_output/bias:0',
'a/level_1/target/qf_1/qf_output/kernel:0',
'b/level_0/target/pi/fc0/bias:0',
'b/level_0/target/pi/fc0/kernel:0',
'b/level_0/target/pi/fc1/bias:0',
'b/level_0/target/pi/fc1/kernel:0',
'b/level_0/target/pi/output/bias:0',
'b/level_0/target/pi/output/kernel:0',
'b/level_0/target/qf_0/fc0/bias:0',
'b/level_0/target/qf_0/fc0/kernel:0',
'b/level_0/target/qf_0/fc1/bias:0',
'b/level_0/target/qf_0/fc1/kernel:0',
'b/level_0/target/qf_0/qf_output/bias:0',
'b/level_0/target/qf_0/qf_output/kernel:0',
'b/level_0/target/qf_1/fc0/bias:0',
'b/level_0/target/qf_1/fc0/kernel:0',
'b/level_0/target/qf_1/fc1/bias:0',
'b/level_0/target/qf_1/fc1/kernel:0',
'b/level_0/target/qf_1/qf_output/bias:0',
'b/level_0/target/qf_1/qf_output/kernel:0',
'b/level_1/target/pi/fc0/bias:0',
'b/level_1/target/pi/fc0/kernel:0',
'b/level_1/target/pi/fc1/bias:0',
'b/level_1/target/pi/fc1/kernel:0',
'b/level_1/target/pi/output/bias:0',
'b/level_1/target/pi/output/kernel:0',
'b/level_1/target/qf_0/fc0/bias:0',
'b/level_1/target/qf_0/fc0/kernel:0',
'b/level_1/target/qf_0/fc1/bias:0',
'b/level_1/target/qf_0/fc1/kernel:0',
'b/level_1/target/qf_0/qf_output/bias:0',
'b/level_1/target/qf_0/qf_output/kernel:0',
'b/level_1/target/qf_1/fc0/bias:0',
'b/level_1/target/qf_1/fc0/kernel:0',
'b/level_1/target/qf_1/fc1/bias:0',
'b/level_1/target/qf_1/fc1/kernel:0',
'b/level_1/target/qf_1/qf_output/bias:0',
'b/level_1/target/qf_1/qf_output/kernel:0',
]
for model, target in zip(model_var_list, target_var_list):
with tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope(), reuse=True):
model_val = policy.sess.run(model)
target_val = policy.sess.run(target)
np.testing.assert_almost_equal(model_val, target_val)
def test_initialize_2(self):
policy_params = self.policy_params_shared.copy()
policy_params["maddpg"] = False
policy = TD3MultiGoalConditionedPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
model_var_list = [
'level_0/model/pi/fc0/bias:0',
'level_0/model/pi/fc0/kernel:0',
'level_0/model/pi/fc1/bias:0',
'level_0/model/pi/fc1/kernel:0',
'level_0/model/pi/output/bias:0',
'level_0/model/pi/output/kernel:0',
'level_0/model/qf_0/fc0/bias:0',
'level_0/model/qf_0/fc0/kernel:0',
'level_0/model/qf_0/fc1/bias:0',
'level_0/model/qf_0/fc1/kernel:0',
'level_0/model/qf_0/qf_output/bias:0',
'level_0/model/qf_0/qf_output/kernel:0',
'level_0/model/qf_1/fc0/bias:0',
'level_0/model/qf_1/fc0/kernel:0',
'level_0/model/qf_1/fc1/bias:0',
'level_0/model/qf_1/fc1/kernel:0',
'level_0/model/qf_1/qf_output/bias:0',
'level_0/model/qf_1/qf_output/kernel:0',
'level_1/model/pi/fc0/bias:0',
'level_1/model/pi/fc0/kernel:0',
'level_1/model/pi/fc1/bias:0',
'level_1/model/pi/fc1/kernel:0',
'level_1/model/pi/output/bias:0',
'level_1/model/pi/output/kernel:0',
'level_1/model/qf_0/fc0/bias:0',
'level_1/model/qf_0/fc0/kernel:0',
'level_1/model/qf_0/fc1/bias:0',
'level_1/model/qf_0/fc1/kernel:0',
'level_1/model/qf_0/qf_output/bias:0',
'level_1/model/qf_0/qf_output/kernel:0',
'level_1/model/qf_1/fc0/bias:0',
'level_1/model/qf_1/fc0/kernel:0',
'level_1/model/qf_1/fc1/bias:0',
'level_1/model/qf_1/fc1/kernel:0',
'level_1/model/qf_1/qf_output/bias:0',
'level_1/model/qf_1/qf_output/kernel:0',
]
target_var_list = [
'level_0/target/pi/fc0/bias:0',
'level_0/target/pi/fc0/kernel:0',
'level_0/target/pi/fc1/bias:0',
'level_0/target/pi/fc1/kernel:0',
'level_0/target/pi/output/bias:0',
'level_0/target/pi/output/kernel:0',
'level_0/target/qf_0/fc0/bias:0',
'level_0/target/qf_0/fc0/kernel:0',
'level_0/target/qf_0/fc1/bias:0',
'level_0/target/qf_0/fc1/kernel:0',
'level_0/target/qf_0/qf_output/bias:0',
'level_0/target/qf_0/qf_output/kernel:0',
'level_0/target/qf_1/fc0/bias:0',
'level_0/target/qf_1/fc0/kernel:0',
'level_0/target/qf_1/fc1/bias:0',
'level_0/target/qf_1/fc1/kernel:0',
'level_0/target/qf_1/qf_output/bias:0',
'level_0/target/qf_1/qf_output/kernel:0',
'level_1/target/pi/fc0/bias:0',
'level_1/target/pi/fc0/kernel:0',
'level_1/target/pi/fc1/bias:0',
'level_1/target/pi/fc1/kernel:0',
'level_1/target/pi/output/bias:0',
'level_1/target/pi/output/kernel:0',
'level_1/target/qf_0/fc0/bias:0',
'level_1/target/qf_0/fc0/kernel:0',
'level_1/target/qf_0/fc1/bias:0',
'level_1/target/qf_0/fc1/kernel:0',
'level_1/target/qf_0/qf_output/bias:0',
'level_1/target/qf_0/qf_output/kernel:0',
'level_1/target/qf_1/fc0/bias:0',
'level_1/target/qf_1/fc0/kernel:0',
'level_1/target/qf_1/fc1/bias:0',
'level_1/target/qf_1/fc1/kernel:0',
'level_1/target/qf_1/qf_output/bias:0',
'level_1/target/qf_1/qf_output/kernel:0',
]
for model, target in zip(model_var_list, target_var_list):
with tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope(), reuse=True):
model_val = policy.sess.run(model)
target_val = policy.sess.run(target)
np.testing.assert_almost_equal(model_val, target_val)
class TestSACMultiGoalConditionedPolicy(unittest.TestCase):
"""Test MultiFeedForwardPolicy in hbaselines/multiagent/h_sac.py."""
def setUp(self):
self.sess = tf.compat.v1.Session()
# Shared policy parameters
self.policy_params_shared = {
'sess': self.sess,
'ac_space': Box(low=-1, high=1, shape=(1,)),
'co_space': Box(low=-2, high=2, shape=(2,)),
'ob_space': Box(low=-3, high=3, shape=(3,)),
'all_ob_space': Box(low=-3, high=3, shape=(10,)),
'verbose': 0,
}
self.policy_params_shared.update(SAC_PARAMS.copy())
self.policy_params_shared.update(GOAL_CONDITIONED_PARAMS.copy())
self.policy_params_shared.update(MULTIAGENT_PARAMS.copy())
self.policy_params_shared['shared'] = True
# Independent policy parameters
self.policy_params_independent = {
'sess': self.sess,
'ac_space': {
'a': Box(low=-1, high=1, shape=(1,)),
'b': Box(low=-2, high=2, shape=(2,)),
},
'co_space': {
'a': Box(low=-3, high=3, shape=(3,)),
'b': Box(low=-4, high=4, shape=(4,)),
},
'ob_space': {
'a': Box(low=-5, high=5, shape=(5,)),
'b': Box(low=-6, high=6, shape=(6,)),
},
'all_ob_space': Box(low=-6, high=6, shape=(18,)),
'verbose': 0,
}
self.policy_params_independent.update(SAC_PARAMS.copy())
self.policy_params_independent.update(GOAL_CONDITIONED_PARAMS.copy())
self.policy_params_independent.update(MULTIAGENT_PARAMS.copy())
self.policy_params_independent['shared'] = False
def tearDown(self):
self.sess.close()
del self.policy_params_shared
del self.policy_params_independent
# Clear the graph.
tf.compat.v1.reset_default_graph()
def test_init_1(self):
"""Check the functionality of the __init__() method.
This method is tested for the following features:
1. The proper structure graph was generated.
2. All input placeholders are correct.
This is done for the following cases:
1. maddpg = False, shared = False
2. maddpg = False, shared = True
"""
policy_params = self.policy_params_independent.copy()
policy_params["maddpg"] = False
policy = SACMultiGoalConditionedPolicy(**policy_params)
self.assertListEqual(
sorted([var.name for var in get_trainable_vars()]),
['a/level_0/model/log_alpha:0',
'a/level_0/model/pi/fc0/bias:0',
'a/level_0/model/pi/fc0/kernel:0',
'a/level_0/model/pi/fc1/bias:0',
'a/level_0/model/pi/fc1/kernel:0',
'a/level_0/model/pi/log_std/bias:0',
'a/level_0/model/pi/log_std/kernel:0',
'a/level_0/model/pi/mean/bias:0',
'a/level_0/model/pi/mean/kernel:0',
'a/level_0/model/value_fns/qf1/fc0/bias:0',
'a/level_0/model/value_fns/qf1/fc0/kernel:0',
'a/level_0/model/value_fns/qf1/fc1/bias:0',
'a/level_0/model/value_fns/qf1/fc1/kernel:0',
'a/level_0/model/value_fns/qf1/qf_output/bias:0',
'a/level_0/model/value_fns/qf1/qf_output/kernel:0',
'a/level_0/model/value_fns/qf2/fc0/bias:0',
'a/level_0/model/value_fns/qf2/fc0/kernel:0',
'a/level_0/model/value_fns/qf2/fc1/bias:0',
'a/level_0/model/value_fns/qf2/fc1/kernel:0',
'a/level_0/model/value_fns/qf2/qf_output/bias:0',
'a/level_0/model/value_fns/qf2/qf_output/kernel:0',
'a/level_0/model/value_fns/vf/fc0/bias:0',
'a/level_0/model/value_fns/vf/fc0/kernel:0',
'a/level_0/model/value_fns/vf/fc1/bias:0',
'a/level_0/model/value_fns/vf/fc1/kernel:0',
'a/level_0/model/value_fns/vf/vf_output/bias:0',
'a/level_0/model/value_fns/vf/vf_output/kernel:0',
'a/level_0/target/value_fns/vf/fc0/bias:0',
'a/level_0/target/value_fns/vf/fc0/kernel:0',
'a/level_0/target/value_fns/vf/fc1/bias:0',
'a/level_0/target/value_fns/vf/fc1/kernel:0',
'a/level_0/target/value_fns/vf/vf_output/bias:0',
'a/level_0/target/value_fns/vf/vf_output/kernel:0',
'a/level_1/model/log_alpha:0',
'a/level_1/model/pi/fc0/bias:0',
'a/level_1/model/pi/fc0/kernel:0',
'a/level_1/model/pi/fc1/bias:0',
'a/level_1/model/pi/fc1/kernel:0',
'a/level_1/model/pi/log_std/bias:0',
'a/level_1/model/pi/log_std/kernel:0',
'a/level_1/model/pi/mean/bias:0',
'a/level_1/model/pi/mean/kernel:0',
'a/level_1/model/value_fns/qf1/fc0/bias:0',
'a/level_1/model/value_fns/qf1/fc0/kernel:0',
'a/level_1/model/value_fns/qf1/fc1/bias:0',
'a/level_1/model/value_fns/qf1/fc1/kernel:0',
'a/level_1/model/value_fns/qf1/qf_output/bias:0',
'a/level_1/model/value_fns/qf1/qf_output/kernel:0',
'a/level_1/model/value_fns/qf2/fc0/bias:0',
'a/level_1/model/value_fns/qf2/fc0/kernel:0',
'a/level_1/model/value_fns/qf2/fc1/bias:0',
'a/level_1/model/value_fns/qf2/fc1/kernel:0',
'a/level_1/model/value_fns/qf2/qf_output/bias:0',
'a/level_1/model/value_fns/qf2/qf_output/kernel:0',
'a/level_1/model/value_fns/vf/fc0/bias:0',
'a/level_1/model/value_fns/vf/fc0/kernel:0',
'a/level_1/model/value_fns/vf/fc1/bias:0',
'a/level_1/model/value_fns/vf/fc1/kernel:0',
'a/level_1/model/value_fns/vf/vf_output/bias:0',
'a/level_1/model/value_fns/vf/vf_output/kernel:0',
'a/level_1/target/value_fns/vf/fc0/bias:0',
'a/level_1/target/value_fns/vf/fc0/kernel:0',
'a/level_1/target/value_fns/vf/fc1/bias:0',
'a/level_1/target/value_fns/vf/fc1/kernel:0',
'a/level_1/target/value_fns/vf/vf_output/bias:0',
'a/level_1/target/value_fns/vf/vf_output/kernel:0',
'b/level_0/model/log_alpha:0',
'b/level_0/model/pi/fc0/bias:0',
'b/level_0/model/pi/fc0/kernel:0',
'b/level_0/model/pi/fc1/bias:0',
'b/level_0/model/pi/fc1/kernel:0',
'b/level_0/model/pi/log_std/bias:0',
'b/level_0/model/pi/log_std/kernel:0',
'b/level_0/model/pi/mean/bias:0',
'b/level_0/model/pi/mean/kernel:0',
'b/level_0/model/value_fns/qf1/fc0/bias:0',
'b/level_0/model/value_fns/qf1/fc0/kernel:0',
'b/level_0/model/value_fns/qf1/fc1/bias:0',
'b/level_0/model/value_fns/qf1/fc1/kernel:0',
'b/level_0/model/value_fns/qf1/qf_output/bias:0',
'b/level_0/model/value_fns/qf1/qf_output/kernel:0',
'b/level_0/model/value_fns/qf2/fc0/bias:0',
'b/level_0/model/value_fns/qf2/fc0/kernel:0',
'b/level_0/model/value_fns/qf2/fc1/bias:0',
'b/level_0/model/value_fns/qf2/fc1/kernel:0',
'b/level_0/model/value_fns/qf2/qf_output/bias:0',
'b/level_0/model/value_fns/qf2/qf_output/kernel:0',
'b/level_0/model/value_fns/vf/fc0/bias:0',
'b/level_0/model/value_fns/vf/fc0/kernel:0',
'b/level_0/model/value_fns/vf/fc1/bias:0',
'b/level_0/model/value_fns/vf/fc1/kernel:0',
'b/level_0/model/value_fns/vf/vf_output/bias:0',
'b/level_0/model/value_fns/vf/vf_output/kernel:0',
'b/level_0/target/value_fns/vf/fc0/bias:0',
'b/level_0/target/value_fns/vf/fc0/kernel:0',
'b/level_0/target/value_fns/vf/fc1/bias:0',
'b/level_0/target/value_fns/vf/fc1/kernel:0',
'b/level_0/target/value_fns/vf/vf_output/bias:0',
'b/level_0/target/value_fns/vf/vf_output/kernel:0',
'b/level_1/model/log_alpha:0',
'b/level_1/model/pi/fc0/bias:0',
'b/level_1/model/pi/fc0/kernel:0',
'b/level_1/model/pi/fc1/bias:0',
'b/level_1/model/pi/fc1/kernel:0',
'b/level_1/model/pi/log_std/bias:0',
'b/level_1/model/pi/log_std/kernel:0',
'b/level_1/model/pi/mean/bias:0',
'b/level_1/model/pi/mean/kernel:0',
'b/level_1/model/value_fns/qf1/fc0/bias:0',
'b/level_1/model/value_fns/qf1/fc0/kernel:0',
'b/level_1/model/value_fns/qf1/fc1/bias:0',
'b/level_1/model/value_fns/qf1/fc1/kernel:0',
'b/level_1/model/value_fns/qf1/qf_output/bias:0',
'b/level_1/model/value_fns/qf1/qf_output/kernel:0',
'b/level_1/model/value_fns/qf2/fc0/bias:0',
'b/level_1/model/value_fns/qf2/fc0/kernel:0',
'b/level_1/model/value_fns/qf2/fc1/bias:0',
'b/level_1/model/value_fns/qf2/fc1/kernel:0',
'b/level_1/model/value_fns/qf2/qf_output/bias:0',
'b/level_1/model/value_fns/qf2/qf_output/kernel:0',
'b/level_1/model/value_fns/vf/fc0/bias:0',
'b/level_1/model/value_fns/vf/fc0/kernel:0',
'b/level_1/model/value_fns/vf/fc1/bias:0',
'b/level_1/model/value_fns/vf/fc1/kernel:0',
'b/level_1/model/value_fns/vf/vf_output/bias:0',
'b/level_1/model/value_fns/vf/vf_output/kernel:0',
'b/level_1/target/value_fns/vf/fc0/bias:0',
'b/level_1/target/value_fns/vf/fc0/kernel:0',
'b/level_1/target/value_fns/vf/fc1/bias:0',
'b/level_1/target/value_fns/vf/fc1/kernel:0',
'b/level_1/target/value_fns/vf/vf_output/bias:0',
'b/level_1/target/value_fns/vf/vf_output/kernel:0']
)
# Check observation/action/context spaces of the agents
self.assertEqual(
policy.agents['a'].policy[0].ac_space.shape[0],
self.policy_params_independent['ob_space']['a'].shape[0]
)
self.assertEqual(
policy.agents['a'].policy[0].ob_space.shape[0],
self.policy_params_independent['ob_space']['a'].shape[0]
)
self.assertEqual(
policy.agents['a'].policy[0].co_space.shape[0],
self.policy_params_independent['co_space']['a'].shape[0]
)
self.assertEqual(
policy.agents['a'].policy[1].ac_space.shape[0],
self.policy_params_independent['ac_space']['a'].shape[0]
)
self.assertEqual(
policy.agents['a'].policy[1].ob_space.shape[0],
self.policy_params_independent['ob_space']['a'].shape[0]
)
self.assertEqual(
policy.agents['a'].policy[1].co_space.shape[0],
self.policy_params_independent['ob_space']['a'].shape[0]
)
self.assertEqual(
policy.agents['b'].policy[0].ac_space.shape[0],
self.policy_params_independent['ob_space']['b'].shape[0]
)
self.assertEqual(
policy.agents['b'].policy[0].ob_space.shape[0],
self.policy_params_independent['ob_space']['b'].shape[0]
)
self.assertEqual(
policy.agents['b'].policy[0].co_space.shape[0],
self.policy_params_independent['co_space']['b'].shape[0]
)
self.assertEqual(
policy.agents['b'].policy[1].ac_space.shape[0],
self.policy_params_independent['ac_space']['b'].shape[0]
)
self.assertEqual(
policy.agents['b'].policy[1].ob_space.shape[0],
self.policy_params_independent['ob_space']['b'].shape[0]
)
self.assertEqual(
policy.agents['b'].policy[1].co_space.shape[0],
self.policy_params_independent['ob_space']['b'].shape[0]
)
# Check the instantiation of the class attributes.
self.assertTrue(not policy.shared)
self.assertTrue(not policy.maddpg)
def test_init_2(self):
policy_params = self.policy_params_shared.copy()
policy_params["maddpg"] = False
policy = SACMultiGoalConditionedPolicy(**policy_params)
self.assertListEqual(
sorted([var.name for var in get_trainable_vars()]),
['level_0/model/log_alpha:0',
'level_0/model/pi/fc0/bias:0',
'level_0/model/pi/fc0/kernel:0',
'level_0/model/pi/fc1/bias:0',
'level_0/model/pi/fc1/kernel:0',
'level_0/model/pi/log_std/bias:0',
'level_0/model/pi/log_std/kernel:0',
'level_0/model/pi/mean/bias:0',
'level_0/model/pi/mean/kernel:0',
'level_0/model/value_fns/qf1/fc0/bias:0',
'level_0/model/value_fns/qf1/fc0/kernel:0',
'level_0/model/value_fns/qf1/fc1/bias:0',
'level_0/model/value_fns/qf1/fc1/kernel:0',
'level_0/model/value_fns/qf1/qf_output/bias:0',
'level_0/model/value_fns/qf1/qf_output/kernel:0',
'level_0/model/value_fns/qf2/fc0/bias:0',
'level_0/model/value_fns/qf2/fc0/kernel:0',
'level_0/model/value_fns/qf2/fc1/bias:0',
'level_0/model/value_fns/qf2/fc1/kernel:0',
'level_0/model/value_fns/qf2/qf_output/bias:0',
'level_0/model/value_fns/qf2/qf_output/kernel:0',
'level_0/model/value_fns/vf/fc0/bias:0',
'level_0/model/value_fns/vf/fc0/kernel:0',
'level_0/model/value_fns/vf/fc1/bias:0',
'level_0/model/value_fns/vf/fc1/kernel:0',
'level_0/model/value_fns/vf/vf_output/bias:0',
'level_0/model/value_fns/vf/vf_output/kernel:0',
'level_0/target/value_fns/vf/fc0/bias:0',
'level_0/target/value_fns/vf/fc0/kernel:0',
'level_0/target/value_fns/vf/fc1/bias:0',
'level_0/target/value_fns/vf/fc1/kernel:0',
'level_0/target/value_fns/vf/vf_output/bias:0',
'level_0/target/value_fns/vf/vf_output/kernel:0',
'level_1/model/log_alpha:0',
'level_1/model/pi/fc0/bias:0',
'level_1/model/pi/fc0/kernel:0',
'level_1/model/pi/fc1/bias:0',
'level_1/model/pi/fc1/kernel:0',
'level_1/model/pi/log_std/bias:0',
'level_1/model/pi/log_std/kernel:0',
'level_1/model/pi/mean/bias:0',
'level_1/model/pi/mean/kernel:0',
'level_1/model/value_fns/qf1/fc0/bias:0',
'level_1/model/value_fns/qf1/fc0/kernel:0',
'level_1/model/value_fns/qf1/fc1/bias:0',
'level_1/model/value_fns/qf1/fc1/kernel:0',
'level_1/model/value_fns/qf1/qf_output/bias:0',
'level_1/model/value_fns/qf1/qf_output/kernel:0',
'level_1/model/value_fns/qf2/fc0/bias:0',
'level_1/model/value_fns/qf2/fc0/kernel:0',
'level_1/model/value_fns/qf2/fc1/bias:0',
'level_1/model/value_fns/qf2/fc1/kernel:0',
'level_1/model/value_fns/qf2/qf_output/bias:0',
'level_1/model/value_fns/qf2/qf_output/kernel:0',
'level_1/model/value_fns/vf/fc0/bias:0',
'level_1/model/value_fns/vf/fc0/kernel:0',
'level_1/model/value_fns/vf/fc1/bias:0',
'level_1/model/value_fns/vf/fc1/kernel:0',
'level_1/model/value_fns/vf/vf_output/bias:0',
'level_1/model/value_fns/vf/vf_output/kernel:0',
'level_1/target/value_fns/vf/fc0/bias:0',
'level_1/target/value_fns/vf/fc0/kernel:0',
'level_1/target/value_fns/vf/fc1/bias:0',
'level_1/target/value_fns/vf/fc1/kernel:0',
'level_1/target/value_fns/vf/vf_output/bias:0',
'level_1/target/value_fns/vf/vf_output/kernel:0']
)
# Check observation/action/context spaces of the agents
self.assertEqual(
policy.agents['policy'].policy[0].ac_space.shape[0],
self.policy_params_shared['ob_space'].shape[0]
)
self.assertEqual(
policy.agents['policy'].policy[0].ob_space.shape[0],
self.policy_params_shared['ob_space'].shape[0]
)
self.assertEqual(
policy.agents['policy'].policy[0].co_space.shape[0],
self.policy_params_shared['co_space'].shape[0]
)
self.assertEqual(
policy.agents['policy'].policy[1].ac_space.shape[0],
self.policy_params_shared['ac_space'].shape[0]
)
self.assertEqual(
policy.agents['policy'].policy[1].ob_space.shape[0],
self.policy_params_shared['ob_space'].shape[0]
)
self.assertEqual(
policy.agents['policy'].policy[1].co_space.shape[0],
self.policy_params_shared['ob_space'].shape[0]
)
# Check the instantiation of the class attributes.
self.assertTrue(policy.shared)
self.assertTrue(not policy.maddpg)
def test_initialize_1(self):
"""Check the functionality of the initialize() method.
This test validates that the target variables are properly initialized
when initialize is called.
This is done for the following cases:
1. maddpg = False, shared = False
2. maddpg = False, shared = True
"""
policy_params = self.policy_params_independent.copy()
policy_params["maddpg"] = False
policy = SACMultiGoalConditionedPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
model_var_list = [
'a/level_0/model/value_fns/vf/fc0/kernel:0',
'a/level_0/model/value_fns/vf/fc0/bias:0',
'a/level_0/model/value_fns/vf/fc1/kernel:0',
'a/level_0/model/value_fns/vf/fc1/bias:0',
'a/level_0/model/value_fns/vf/vf_output/kernel:0',
'a/level_0/model/value_fns/vf/vf_output/bias:0',
'a/level_1/model/value_fns/vf/fc0/kernel:0',
'a/level_1/model/value_fns/vf/fc0/bias:0',
'a/level_1/model/value_fns/vf/fc1/kernel:0',
'a/level_1/model/value_fns/vf/fc1/bias:0',
'a/level_1/model/value_fns/vf/vf_output/kernel:0',
'a/level_1/model/value_fns/vf/vf_output/bias:0',
'b/level_0/model/value_fns/vf/fc0/kernel:0',
'b/level_0/model/value_fns/vf/fc0/bias:0',
'b/level_0/model/value_fns/vf/fc1/kernel:0',
'b/level_0/model/value_fns/vf/fc1/bias:0',
'b/level_0/model/value_fns/vf/vf_output/kernel:0',
'b/level_0/model/value_fns/vf/vf_output/bias:0',
'b/level_1/model/value_fns/vf/fc0/kernel:0',
'b/level_1/model/value_fns/vf/fc0/bias:0',
'b/level_1/model/value_fns/vf/fc1/kernel:0',
'b/level_1/model/value_fns/vf/fc1/bias:0',
'b/level_1/model/value_fns/vf/vf_output/kernel:0',
'b/level_1/model/value_fns/vf/vf_output/bias:0',
]
target_var_list = [
'a/level_0/target/value_fns/vf/fc0/kernel:0',
'a/level_0/target/value_fns/vf/fc0/bias:0',
'a/level_0/target/value_fns/vf/fc1/kernel:0',
'a/level_0/target/value_fns/vf/fc1/bias:0',
'a/level_0/target/value_fns/vf/vf_output/kernel:0',
'a/level_0/target/value_fns/vf/vf_output/bias:0',
'a/level_1/target/value_fns/vf/fc0/kernel:0',
'a/level_1/target/value_fns/vf/fc0/bias:0',
'a/level_1/target/value_fns/vf/fc1/kernel:0',
'a/level_1/target/value_fns/vf/fc1/bias:0',
'a/level_1/target/value_fns/vf/vf_output/kernel:0',
'a/level_1/target/value_fns/vf/vf_output/bias:0',
'b/level_0/target/value_fns/vf/fc0/kernel:0',
'b/level_0/target/value_fns/vf/fc0/bias:0',
'b/level_0/target/value_fns/vf/fc1/kernel:0',
'b/level_0/target/value_fns/vf/fc1/bias:0',
'b/level_0/target/value_fns/vf/vf_output/kernel:0',
'b/level_0/target/value_fns/vf/vf_output/bias:0',
'b/level_1/target/value_fns/vf/fc0/kernel:0',
'b/level_1/target/value_fns/vf/fc0/bias:0',
'b/level_1/target/value_fns/vf/fc1/kernel:0',
'b/level_1/target/value_fns/vf/fc1/bias:0',
'b/level_1/target/value_fns/vf/vf_output/kernel:0',
'b/level_1/target/value_fns/vf/vf_output/bias:0',
]
for model, target in zip(model_var_list, target_var_list):
with tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope(), reuse=True):
model_val = policy.sess.run(model)
target_val = policy.sess.run(target)
np.testing.assert_almost_equal(model_val, target_val)
def test_initialize_2(self):
policy_params = self.policy_params_shared.copy()
policy_params["maddpg"] = False
policy = SACMultiGoalConditionedPolicy(**policy_params)
# Initialize the variables of the policy.
policy.sess.run(tf.compat.v1.global_variables_initializer())
# Run the initialize method.
policy.initialize()
model_var_list = [
'level_0/model/value_fns/vf/fc0/kernel:0',
'level_0/model/value_fns/vf/fc0/bias:0',
'level_0/model/value_fns/vf/fc1/kernel:0',
'level_0/model/value_fns/vf/fc1/bias:0',
'level_0/model/value_fns/vf/vf_output/kernel:0',
'level_0/model/value_fns/vf/vf_output/bias:0',
'level_1/model/value_fns/vf/fc0/kernel:0',
'level_1/model/value_fns/vf/fc0/bias:0',
'level_1/model/value_fns/vf/fc1/kernel:0',
'level_1/model/value_fns/vf/fc1/bias:0',
'level_1/model/value_fns/vf/vf_output/kernel:0',
'level_1/model/value_fns/vf/vf_output/bias:0',
]
target_var_list = [
'level_0/target/value_fns/vf/fc0/kernel:0',
'level_0/target/value_fns/vf/fc0/bias:0',
'level_0/target/value_fns/vf/fc1/kernel:0',
'level_0/target/value_fns/vf/fc1/bias:0',
'level_0/target/value_fns/vf/vf_output/kernel:0',
'level_0/target/value_fns/vf/vf_output/bias:0',
'level_1/target/value_fns/vf/fc0/kernel:0',
'level_1/target/value_fns/vf/fc0/bias:0',
'level_1/target/value_fns/vf/fc1/kernel:0',
'level_1/target/value_fns/vf/fc1/bias:0',
'level_1/target/value_fns/vf/vf_output/kernel:0',
'level_1/target/value_fns/vf/vf_output/bias:0',
]
for model, target in zip(model_var_list, target_var_list):
with tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope(), reuse=True):
model_val = policy.sess.run(model)
target_val = policy.sess.run(target)
np.testing.assert_almost_equal(model_val, target_val)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/fast_tests/test_replay_buffer.py
```python
import unittest
import random
import numpy as np
from hbaselines.fcnet.replay_buffer import ReplayBuffer
from hbaselines.goal_conditioned.replay_buffer import HierReplayBuffer
from hbaselines.multiagent.replay_buffer import MultiReplayBuffer
from hbaselines.multiagent.replay_buffer import SharedReplayBuffer
class TestReplayBuffer(unittest.TestCase):
"""Tests for the ReplayBuffer object."""
def setUp(self):
self.replay_buffer = ReplayBuffer(
buffer_size=2, batch_size=1, obs_dim=1, ac_dim=1)
def tearDown(self):
del self.replay_buffer
def test_buffer_size(self):
"""Validate the buffer_size output from the replay buffer."""
self.assertEqual(self.replay_buffer.buffer_size, 2)
def test_add_sample(self):
"""Test the `add` and `sample` methods the replay buffer."""
# Add an element.
self.replay_buffer.add(
obs_t=np.array([0]),
action=np.array([1]),
reward=2,
obs_tp1=np.array([3]),
done=False
)
# Check is_full in the False case.
self.assertEqual(self.replay_buffer.is_full(), False)
# Add an element.
self.replay_buffer.add(
obs_t=np.array([0]),
action=np.array([1]),
reward=2,
obs_tp1=np.array([3]),
done=False
)
# Check is_full in the True case.
self.assertEqual(self.replay_buffer.is_full(), True)
# Check can_sample in the True case.
self.assertEqual(self.replay_buffer.can_sample(), True)
# Test the `sample` method.
obs_t, actions_t, rewards, obs_tp1, done = self.replay_buffer.sample()
np.testing.assert_array_almost_equal(obs_t, [[0]])
np.testing.assert_array_almost_equal(actions_t, [[1]])
np.testing.assert_array_almost_equal(rewards, [2])
np.testing.assert_array_almost_equal(obs_tp1, [[3]])
np.testing.assert_array_almost_equal(done, [False])
class TestHierReplayBuffer(unittest.TestCase):
"""Tests for the HierReplayBuffer object."""
def setUp(self):
self.replay_buffer = HierReplayBuffer(
buffer_size=2,
batch_size=1,
meta_period=3,
obs_dim=1,
ac_dim=1,
co_dim=1,
goal_dim=1,
num_levels=3,
)
def tearDown(self):
del self.replay_buffer
def test_buffer_size(self):
"""Validate the buffer_size output from the replay buffer."""
self.assertEqual(self.replay_buffer.buffer_size, 2)
def test_add_sample(self):
"""Test the `add` and `sample` methods the replay buffer."""
# Set the random seed.
random.seed(0)
obs_t = [np.array([0]), np.array([1]), np.array([2]),
np.array([3]), np.array([4]), np.array([5]),
np.array([6]), np.array([7]), np.array([8]),
np.array([9])]
action_t = [[np.array([0]), np.array([1]), np.array([2]),
np.array([3])],
[np.array([0]), np.array([1]), np.array([2]),
np.array([3]), np.array([4]), np.array([5]),
np.array([6]), np.array([7]), np.array([8]),
np.array([9])],
[np.array([0]), np.array([1]), np.array([2]),
np.array([3]), np.array([4]), np.array([5]),
np.array([6]), np.array([7]), np.array([8]),
np.array([9])]]
context_t = [np.array([0]), np.array([1])]
reward_t = [[0], [0, 1, 2], [0, 1, 2, 3, 4, 5, 6, 7, 8]]
done_t = [
False, False, False, False, False, False, False, False, False]
# Add an element.
self.replay_buffer.add(
obs_t=obs_t,
action_t=action_t,
context_t=context_t,
reward_t=reward_t,
done_t=done_t,
)
# Check is_full in the False case.
self.assertEqual(self.replay_buffer.is_full(), False)
# Add an element.
self.replay_buffer.add(
obs_t=obs_t,
action_t=action_t,
context_t=context_t,
reward_t=reward_t,
done_t=done_t,
)
# Check is_full in the True case.
self.assertEqual(self.replay_buffer.is_full(), True)
# Check can_sample in the True case.
self.assertEqual(self.replay_buffer.can_sample(), True)
# Test the `sample` method.
obs0, obs1, act, rew, done, _ = self.replay_buffer.sample(False)
np.testing.assert_array_almost_equal(obs0[0], [[0, 0]])
np.testing.assert_array_almost_equal(obs0[1], [[6, 2]])
np.testing.assert_array_almost_equal(obs0[2], [[6, 6]])
np.testing.assert_array_almost_equal(obs1[0], [[9, 1]])
np.testing.assert_array_almost_equal(obs1[1], [[9, 3]])
np.testing.assert_array_almost_equal(obs1[2], [[7, 7]])
np.testing.assert_array_almost_equal(act[0], [[0]])
np.testing.assert_array_almost_equal(act[1], [[6]])
np.testing.assert_array_almost_equal(act[2], [[6]])
np.testing.assert_array_almost_equal(rew[0], [0])
np.testing.assert_array_almost_equal(rew[1], [2])
np.testing.assert_array_almost_equal(rew[2], [6])
np.testing.assert_array_almost_equal(done[0], [0])
np.testing.assert_array_almost_equal(done[1], [0])
np.testing.assert_array_almost_equal(done[2], [0])
class TestMultiReplayBuffer(unittest.TestCase):
"""Tests for the MultiReplayBuffer object."""
def setUp(self):
self.replay_buffer = MultiReplayBuffer(
buffer_size=2,
batch_size=1,
obs_dim=1,
ac_dim=2,
all_obs_dim=3,
all_ac_dim=4
)
def tearDown(self):
del self.replay_buffer
def test_init(self):
"""Validate that all the attributes were initialize properly."""
self.assertTupleEqual(self.replay_buffer.obs_t.shape, (2, 1))
self.assertTupleEqual(self.replay_buffer.action_t.shape, (2, 2))
self.assertTupleEqual(self.replay_buffer.reward.shape, (2,))
self.assertTupleEqual(self.replay_buffer.obs_tp1.shape, (2, 1))
self.assertTupleEqual(self.replay_buffer.done.shape, (2,))
self.assertTupleEqual(self.replay_buffer.all_obs_t.shape, (2, 3))
self.assertTupleEqual(self.replay_buffer.all_action_t.shape, (2, 4))
self.assertTupleEqual(self.replay_buffer.all_obs_tp1.shape, (2, 3))
def test_buffer_size(self):
"""Validate the buffer_size output from the replay buffer."""
self.assertEqual(self.replay_buffer.buffer_size, 2)
def test_add_sample(self):
"""Test the `add` and `sample` methods the replay buffer."""
# Add an element.
self.replay_buffer.add(
obs_t=np.array([0]),
action=np.array([1, 1]),
reward=2,
obs_tp1=np.array([3]),
done=False,
all_obs_t=np.array([4, 4, 4]),
all_action_t=np.array([5, 5, 5, 5]),
all_obs_tp1=np.array([6, 6, 6])
)
# Check is_full in the False case.
self.assertEqual(self.replay_buffer.is_full(), False)
# Add an element.
self.replay_buffer.add(
obs_t=np.array([0]),
action=np.array([1, 1]),
reward=2,
obs_tp1=np.array([3]),
done=False,
all_obs_t=np.array([4, 4, 4]),
all_action_t=np.array([5, 5, 5, 5]),
all_obs_tp1=np.array([6, 6, 6])
)
# Check is_full in the True case.
self.assertEqual(self.replay_buffer.is_full(), True)
# Check can_sample in the True case.
self.assertEqual(self.replay_buffer.can_sample(), True)
# Test the `sample` method.
obs_t, actions_t, rewards, obs_tp1, done, all_obs_t, all_actions_t, \
all_obs_tp1 = self.replay_buffer.sample()
np.testing.assert_array_almost_equal(obs_t, [[0]])
np.testing.assert_array_almost_equal(actions_t, [[1, 1]])
np.testing.assert_array_almost_equal(rewards, [2])
np.testing.assert_array_almost_equal(obs_tp1, [[3]])
np.testing.assert_array_almost_equal(done, [False])
np.testing.assert_array_almost_equal(all_obs_t, [[4, 4, 4]])
np.testing.assert_array_almost_equal(all_actions_t, [[5, 5, 5, 5]])
np.testing.assert_array_almost_equal(all_obs_tp1, [[6, 6, 6]])
class TestSharedReplayBuffer(unittest.TestCase):
"""Tests for the SharedReplayBuffer object."""
def setUp(self):
self.replay_buffer = SharedReplayBuffer(
buffer_size=2,
batch_size=1,
obs_dim=1,
ac_dim=2,
n_agents=3,
all_obs_dim=4
)
def tearDown(self):
del self.replay_buffer
def test_init(self):
"""Validate that all the attributes were initialize properly."""
# These variables are stored for all agents, so should be a list for
# each agent.
self.assertEqual(len(self.replay_buffer.obs_t), 3)
self.assertEqual(len(self.replay_buffer.action), 3)
self.assertEqual(len(self.replay_buffer.obs_tp1), 3)
# Check the sizes of the individual variables.
self.assertTupleEqual(self.replay_buffer.reward.shape, (2,))
self.assertTupleEqual(self.replay_buffer.done.shape, (2,))
self.assertTupleEqual(self.replay_buffer.all_obs_t.shape, (2, 4))
self.assertTupleEqual(self.replay_buffer.all_obs_tp1.shape, (2, 4))
for i in range(3): # loop through num_agents
self.assertTupleEqual(self.replay_buffer.obs_t[i].shape, (2, 1))
self.assertTupleEqual(self.replay_buffer.action[i].shape, (2, 2))
self.assertTupleEqual(self.replay_buffer.obs_tp1[i].shape, (2, 1))
def test_buffer_size(self):
"""Validate the buffer_size output from the replay buffer."""
self.assertEqual(self.replay_buffer.buffer_size, 2)
def test_add_sample(self):
"""Test the `add` and `sample` methods the replay buffer."""
# Add an element.
self.replay_buffer.add(
obs_t=[np.array([0]), np.array([1]), np.array([2])],
action=[np.array([3, 3]), np.array([4, 4]), np.array([5, 5])],
reward=6,
obs_tp1=[np.array([7]), np.array([8]), np.array([9])],
done=False,
all_obs_t=np.array([10, 10, 10, 10]),
all_obs_tp1=np.array([11, 11, 11, 11]),
)
# Check is_full in the False case.
self.assertEqual(self.replay_buffer.is_full(), False)
# Add an element.
self.replay_buffer.add(
obs_t=[np.array([0]), np.array([1]), np.array([2])],
action=[np.array([3, 3]), np.array([4, 4]), np.array([5, 5])],
reward=6,
obs_tp1=[np.array([7]), np.array([8]), np.array([9])],
done=False,
all_obs_t=np.array([10, 10, 10, 10]),
all_obs_tp1=np.array([11, 11, 11, 11]),
)
# Check is_full in the True case.
self.assertEqual(self.replay_buffer.is_full(), True)
# Check can_sample in the True case.
self.assertEqual(self.replay_buffer.can_sample(), True)
# Test the `sample` method.
obs_t, actions_t, rewards, obs_tp1, done, all_obs_t, all_obs_tp1 = \
self.replay_buffer.sample()
np.testing.assert_array_almost_equal(rewards, [6])
np.testing.assert_array_almost_equal(done, [False])
np.testing.assert_array_almost_equal(all_obs_t, [[10, 10, 10, 10]])
np.testing.assert_array_almost_equal(all_obs_tp1, [[11, 11, 11, 11]])
np.testing.assert_array_almost_equal(obs_t[0], [[0]])
np.testing.assert_array_almost_equal(obs_t[1], [[1]])
np.testing.assert_array_almost_equal(obs_t[2], [[2]])
np.testing.assert_array_almost_equal(actions_t[0], [[3, 3]])
np.testing.assert_array_almost_equal(actions_t[1], [[4, 4]])
np.testing.assert_array_almost_equal(actions_t[2], [[5, 5]])
np.testing.assert_array_almost_equal(obs_tp1[0], [[7]])
np.testing.assert_array_almost_equal(obs_tp1[1], [[8]])
np.testing.assert_array_almost_equal(obs_tp1[2], [[9]])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jiangsy/mj_envs",
"score": 2
} |
#### File: mj_envs/hand_manipulation_suite/relocate_v0.py
```python
import os
import warnings
from gym import utils, spaces
from mjrl.envs import mujoco_env
import mujoco_py
import numpy as np
ADD_BONUS_REWARDS = True
DEFAULT_SIZE = 128
class RelocateEnvV0(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, use_full_state=False):
warnings.warn("State space is different from the original state space")
self.target_obj_sid = 0
self.S_grasp_sid = 0
self.obj_bid = 0
if use_full_state:
self._get_obs = self.get_env_full_state
else:
# self._get_obs = self.get_obs
self._get_obs = self.get_env_state
curr_dir = os.path.dirname(os.path.abspath(__file__))
mujoco_env.MujocoEnv.__init__(self, curr_dir + '/assets/DAPG_relocate.xml', 5)
# change actuator sensitivity
self.sim.model.actuator_gainprm[
self.sim.model.actuator_name2id('A_WRJ1'):self.sim.model.actuator_name2id('A_WRJ0') + 1, :3] = np.array(
[10, 0, 0])
self.sim.model.actuator_gainprm[
self.sim.model.actuator_name2id('A_FFJ3'):self.sim.model.actuator_name2id('A_THJ0') + 1, :3] = np.array(
[1, 0, 0])
self.sim.model.actuator_biasprm[
self.sim.model.actuator_name2id('A_WRJ1'):self.sim.model.actuator_name2id('A_WRJ0') + 1, :3] = np.array(
[0, -10, 0])
self.sim.model.actuator_biasprm[
self.sim.model.actuator_name2id('A_FFJ3'):self.sim.model.actuator_name2id('A_THJ0') + 1, :3] = np.array(
[0, -1, 0])
self.target_obj_sid = self.sim.model.site_name2id("target")
self.S_grasp_sid = self.sim.model.site_name2id('S_grasp')
self.obj_bid = self.sim.model.body_name2id('Object')
utils.EzPickle.__init__(self)
self.act_mid = np.mean(self.model.actuator_ctrlrange, axis=1)
self.act_rng = 0.5 * (self.model.actuator_ctrlrange[:, 1] - self.model.actuator_ctrlrange[:, 0])
self.action_space.high = np.ones_like(self.model.actuator_ctrlrange[:, 1])
self.action_space.low = -1.0 * np.ones_like(self.model.actuator_ctrlrange[:, 0])
obs = self.get_obs()
state = self.get_env_state()
full_state = self.get_env_full_state()
self.obs_dim = obs.size
self.state_dim = state.size
self.full_state_dim = full_state.size
obs_high = np.inf * np.ones(self.obs_dim)
obs_low = -obs_high
self.observation_space = spaces.Box(obs_low, obs_high, dtype=np.float32)
state_high = np.inf * np.ones(self.state_dim)
state_low = -state_high
self.state_space = spaces.Box(state_low, state_high, dtype=np.float32)
full_state_high = np.inf * np.ones(self.full_state_dim)
full_state_low = -full_state_high
self.full_state_space = spaces.Box(full_state_low, full_state_high, dtype=np.float32)
self.viewer = None
self._viewers = {}
def step(self, a):
a = np.clip(a, -1.0, 1.0)
try:
a = self.act_mid + a * self.act_rng # mean center and scale
except:
a = a # only for the initialization phase
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
obj_pos = self.data.body_xpos[self.obj_bid].ravel()
palm_pos = self.data.site_xpos[self.S_grasp_sid].ravel()
target_pos = self.data.site_xpos[self.target_obj_sid].ravel()
reward = -0.1 * np.linalg.norm(palm_pos - obj_pos) # take hand to object
if obj_pos[2] > 0.04: # if object off the table
reward += 1.0 # bonus for lifting the object
reward += -0.5 * np.linalg.norm(palm_pos - target_pos) # make hand go to target
reward += -0.5 * np.linalg.norm(obj_pos - target_pos) # make object go to target
if ADD_BONUS_REWARDS:
if np.linalg.norm(obj_pos - target_pos) < 0.1:
reward += 10.0 # bonus for object close to target
if np.linalg.norm(obj_pos - target_pos) < 0.05:
reward += 20.0 # bonus for object "very" close to target
goal_achieved = True if np.linalg.norm(obj_pos - target_pos) < 0.1 else False
return ob, reward, False, dict(goal_achieved=goal_achieved)
def get_obs(self):
# qpos for hand
# xpos for obj
# xpos for target
qp = self.data.qpos.ravel()
obj_pos = self.data.body_xpos[self.obj_bid].ravel()
palm_pos = self.data.site_xpos[self.S_grasp_sid].ravel()
target_pos = self.data.site_xpos[self.target_obj_sid].ravel()
return np.concatenate([qp[:-6], palm_pos - obj_pos, palm_pos - target_pos, obj_pos - target_pos])
def reset_model(self):
qp = self.init_qpos.copy()
qv = self.init_qvel.copy()
self.set_state(qp, qv)
self.model.body_pos[self.obj_bid, 0] = self.np_random.uniform(low=-0.15, high=0.15)
self.model.body_pos[self.obj_bid, 1] = self.np_random.uniform(low=-0.15, high=0.3)
self.model.site_pos[self.target_obj_sid, 0] = self.np_random.uniform(low=-0.2, high=0.2)
self.model.site_pos[self.target_obj_sid, 1] = self.np_random.uniform(low=-0.2, high=0.2)
self.model.site_pos[self.target_obj_sid, 2] = self.np_random.uniform(low=0.15, high=0.35)
self.sim.forward()
return self._get_obs()
def get_env_full_state(self):
return self.get_env_state()
def full_state_to_state(self, full_states):
assert full_states.ndim == 2
return full_states
def full_state_to_obs(self, full_states):
assert full_states.ndim == 2
qp = full_states[:, :36]
obj_pos = full_states[:, 102:105]
palm_pos = full_states[:, 105:108]
target_pos = full_states[:, 108:111]
return np.concatenate([qp[:, :-6], palm_pos - obj_pos, palm_pos - target_pos, obj_pos - target_pos], axis=-1)
def get_env_state(self):
"""
Get state of hand as well as objects and targets in the scene
"""
qp = self.data.qpos.ravel().copy()
qv = self.data.qvel.ravel().copy()
hand_qpos = qp[:30]
obj_xpos = self.data.body_xpos[self.obj_bid].ravel()
palm_xpos = self.data.site_xpos[self.S_grasp_sid].ravel()
target_xpos = self.data.site_xpos[self.target_obj_sid].ravel()
obj_pos = self.model.body_pos[self.obj_bid].ravel()
target_pos = self.model.site_pos[self.target_obj_sid].ravel()
return np.concatenate([qp, qv, hand_qpos, obj_xpos, palm_xpos, target_xpos, obj_pos, target_pos])
def set_env_state(self, state):
qp = state[:36]
qv = state[36:72]
obj_pos = state[111:114]
target_pos = state[114:117]
self.set_state(qp, qv)
self.model.body_pos[self.obj_bid] = obj_pos.copy()
self.model.site_pos[self.target_obj_sid] = target_pos.copy()
self.sim.forward()
def set_env_full_state(self, full_state):
self.set_env_state(full_state)
def mj_viewer_setup(self):
self.viewer.cam.azimuth = 90
self.sim.forward()
self.viewer.cam.distance = 1.5
def evaluate_success(self, paths):
num_success = 0
num_paths = len(paths)
# success if object close to target for 25 steps
for path in paths:
if np.sum(path['env_infos']['goal_achieved']) > 25:
num_success += 1
success_percentage = num_success * 100.0 / num_paths
return success_percentage
def render(self,
mode='human',
width=DEFAULT_SIZE,
height=DEFAULT_SIZE,
camera_id=None,
camera_name=None):
if mode == 'rgb_array' or mode == 'depth_array':
if camera_id is not None and camera_name is not None:
raise ValueError("Both `camera_id` and `camera_name` cannot be"
" specified at the same time.")
no_camera_specified = camera_name is None and camera_id is None
if no_camera_specified:
camera_name = 'fixed'
if camera_id is None and camera_name in self.model._camera_name2id:
camera_id = self.model.camera_name2id(camera_name)
self._get_viewer(mode).render(width, height, camera_id=camera_id)
if mode == 'rgb_array':
# window size used for old mujoco-py:
data = self._get_viewer(mode).read_pixels(width, height, depth=False)
# original image is upside-down, so flip it
return data[::-1, :, :]
elif mode == 'depth_array':
self._get_viewer(mode).render(width, height)
# window size used for old mujoco-py:
# Extract depth part of the read_pixels() tuple
data = self._get_viewer(mode).read_pixels(width, height, depth=True)[1]
# original image is upside-down, so flip it
return data[::-1, :]
elif mode == 'human':
self._get_viewer(mode).render()
def _get_viewer(self, mode):
self.viewer = self._viewers.get(mode)
if self.viewer is None:
if mode == 'human':
self.viewer = mujoco_py.MjViewer(self.sim)
elif mode == 'rgb_array' or mode == 'depth_array':
self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1)
self.mj_viewer_setup()
self._viewers[mode] = self.viewer
return self.viewer
def close(self):
if self.viewer is not None:
# self.viewer.finish()
self.viewer = None
self._viewers = {}
if __name__ == '__main__':
env = RelocateEnvV0()
env.reset()
env.get_env_full_state()
print(0)
``` |
{
"source": "JiangtaoFeng/MaskGIT-pytorch",
"score": 2
} |
#### File: JiangtaoFeng/MaskGIT-pytorch/training_transformer.py
```python
import os
import numpy as np
from tqdm import tqdm
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import utils as vutils
from transformer import VQGANTransformer
from utils import load_data, plot_images
class TrainTransformer:
def __init__(self, args):
self.model = VQGANTransformer(args).to(device=args.device)
self.optim = self.configure_optimizers()
self.train(args)
def train(self, args):
train_dataset = load_data(args)
for epoch in range(args.epochs):
with tqdm(range(len(train_dataset))) as pbar:
for i, imgs in zip(pbar, train_dataset):
self.optim.zero_grad()
imgs = imgs.to(device=args.device)
logits, target = self.model(imgs)
loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), target.reshape(-1))
loss.backward()
self.optim.step()
pbar.set_postfix(Transformer_Loss=np.round(loss.cpu().detach().numpy().item(), 4))
pbar.update(0)
log, sampled_imgs = self.model.log_images(imgs[0][None])
vutils.save_image(sampled_imgs, os.path.join("results", f"{epoch}.jpg"), nrow=4)
plot_images(log)
torch.save(self.model.state_dict(), os.path.join("checkpoints", f"transformer_epoch_{epoch}.pt"))
def configure_optimizers(self):
decay, no_decay = set(), set()
whitelist_weight_modules = (nn.Linear,)
blacklist_weight_modules = (nn.LayerNorm, nn.Embedding)
for mn, m in self.model.transformer.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
no_decay.add(fpn)
no_decay.add('pos_emb')
param_dict = {pn: p for pn, p in self.model.transformer.named_parameters()}
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=1e-4, betas=(0.9, 0.95))
return optimizer
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="VQGAN")
parser.add_argument('--latent-dim', type=int, default=256, help='Latent dimension n_z.')
parser.add_argument('--image-size', type=int, default=256, help='Image height and width.)')
parser.add_argument('--num-codebook-vectors', type=int, default=1024, help='Number of codebook vectors.')
parser.add_argument('--beta', type=float, default=0.25, help='Commitment loss scalar.')
parser.add_argument('--image-channels', type=int, default=3, help='Number of channels of images.')
parser.add_argument('--dataset-path', type=str, default='./data', help='Path to data.')
parser.add_argument('--checkpoint-path', type=str, default='./checkpoints/last_ckpt.pt', help='Path to checkpoint.')
parser.add_argument('--device', type=str, default="cuda", help='Which device the training is on')
parser.add_argument('--batch-size', type=int, default=10, help='Input batch size for training.')
parser.add_argument('--epochs', type=int, default=50, help='Number of epochs to train.')
parser.add_argument('--learning-rate', type=float, default=2.25e-05, help='Learning rate.')
parser.add_argument('--beta1', type=float, default=0.5, help='Adam beta param.')
parser.add_argument('--beta2', type=float, default=0.9, help='Adam beta param.')
parser.add_argument('--disc-start', type=int, default=10000, help='When to start the discriminator.')
parser.add_argument('--disc-factor', type=float, default=1., help='Weighting factor for the Discriminator.')
parser.add_argument('--l2-loss-factor', type=float, default=1., help='Weighting factor for reconstruction loss.')
parser.add_argument('--perceptual-loss-factor', type=float, default=1., help='Weighting factor for perceptual loss.')
parser.add_argument('--pkeep', type=float, default=0.5, help='Percentage for how much latent codes to keep.')
parser.add_argument('--sos-token', type=int, default=0, help='Start of Sentence token.')
args = parser.parse_args()
train_transformer = TrainTransformer(args)
```
#### File: JiangtaoFeng/MaskGIT-pytorch/utils.py
```python
import os
import albumentations
import numpy as np
import torch.nn as nn
from PIL import Image
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
# --------------------------------------------- #
# Data Utils
# --------------------------------------------- #
class ImagePaths(Dataset):
def __init__(self, path, size=None):
self.size = size
self.images = [os.path.join(path, file) for file in os.listdir(path)]
self._length = len(self.images)
self.rescaler = albumentations.SmallestMaxSize(max_size=self.size)
self.cropper = albumentations.CenterCrop(height=self.size, width=self.size)
self.preprocessor = albumentations.Compose([self.rescaler, self.cropper])
def __len__(self):
return self._length
def preprocess_image(self, image_path):
image = Image.open(image_path)
if not image.mode == "RGB":
image = image.convert("RGB")
image = np.array(image).astype(np.uint8)
image = self.preprocessor(image=image)["image"]
image = (image / 127.5 - 1.0).astype(np.float32)
image = image.transpose(2, 0, 1)
return image
def __getitem__(self, i):
example = self.preprocess_image(self.images[i])
return example
def load_data(args):
train_data = ImagePaths(args.dataset_path, size=256)
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=False)
return train_loader
# --------------------------------------------- #
# Module Utils
# for Encoder, Decoder etc.
# --------------------------------------------- #
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
def plot_images(images: dict):
x = images["input"]
reconstruction = images["rec"]
half_sample = images["half_sample"]
new_sample = images["new_sample"]
fig, axarr = plt.subplots(1, 4)
axarr[0].imshow(x.cpu().detach().numpy()[0].transpose(1, 2, 0))
axarr[1].imshow(reconstruction.cpu().detach().numpy()[0].transpose(1, 2, 0))
axarr[2].imshow(half_sample.cpu().detach().numpy()[0].transpose(1, 2, 0))
axarr[3].imshow(new_sample.cpu().detach().numpy()[0].transpose(1, 2, 0))
plt.show()
``` |
{
"source": "jiangtaojiang/AutowareAuto",
"score": 2
} |
#### File: controller_testing/launch/controller_testing_node_pure_pursuit.launch.py
```python
from launch import LaunchContext
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.actions import Shutdown
from launch.conditions import IfCondition
from launch.conditions import UnlessCondition
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
from launch_ros.substitutions import FindPackage
from pathlib import Path
import os
context = LaunchContext()
def get_package_share_directory(package_name):
"""Return the absolute path to the share directory of the given package."""
return os.path.join(
Path(FindPackage(package_name).perform(context)), "share", package_name
)
def generate_launch_description():
"""Launch controller_testing_node and pure purusit controller."""
controller_testing_pkg_prefix = get_package_share_directory("controller_testing")
controller_testing_param_file = os.path.join(
controller_testing_pkg_prefix, "param/defaults.param.yaml"
)
rviz_cfg_path = os.path.join(controller_testing_pkg_prefix, 'config/pure_pursuit_cotrols.rviz')
pure_pursuit_pkg_prefix = get_package_share_directory("pure_pursuit_nodes")
pure_pursuit_param_file = os.path.join(
pure_pursuit_pkg_prefix, "param/pure_pursuit.param.yaml"
)
urdf_pkg_prefix = get_package_share_directory('lexus_rx_450h_description')
urdf_path = os.path.join(urdf_pkg_prefix, 'urdf/lexus_rx_450h.urdf')
# Arguments
controller_testing_param = DeclareLaunchArgument(
"controller_testing_param_file",
default_value=controller_testing_param_file,
description="Path to config file for Controller Testing",
)
pure_pursuit_controller_param = DeclareLaunchArgument(
"pure_pursuit_param_file",
default_value=pure_pursuit_param_file,
description="Path to config file to Pure Pursuit Controller",
)
with_rviz_param = DeclareLaunchArgument(
'with_rviz',
default_value='True',
description='Launch RVIZ2 in addition to other nodes'
)
real_time_sim_param = DeclareLaunchArgument(
'real_time_sim',
default_value='False',
description='Launch RVIZ2 in addition to other nodes'
)
# Nodes
controller_testing = Node(
package="controller_testing",
node_executable="controller_testing_main.py",
node_namespace="control",
node_name="controller_testing_node",
output="screen",
parameters=[LaunchConfiguration("controller_testing_param_file"), {
'real_time_sim': LaunchConfiguration('real_time_sim')
}],
remappings=[
("vehicle_state", "/vehicle/vehicle_kinematic_state"),
("planned_trajectory", "/planning/trajectory"),
("control_command", "/vehicle/control_command"),
("control_diagnostic", "/control/control_diagnostic"),
],
on_exit=Shutdown(),
condition=UnlessCondition(LaunchConfiguration('with_rviz'))
)
controller_testing_delayed = Node(
package="controller_testing",
node_executable="controller_testing_main.py",
node_namespace="control",
node_name="controller_testing_node",
output="screen",
parameters=[LaunchConfiguration("controller_testing_param_file"), {
'real_time_sim': LaunchConfiguration('real_time_sim')
}],
remappings=[
("vehicle_state", "/vehicle/vehicle_kinematic_state"),
("planned_trajectory", "/planning/trajectory"),
("control_command", "/vehicle/control_command"),
("control_diagnostic", "/control/control_diagnostic"),
],
on_exit=Shutdown(),
# delay added to allow rviz to be ready, better to start rviz separately, beforehand
prefix="bash -c 'sleep 2.0; $0 $@'",
condition=IfCondition(LaunchConfiguration('with_rviz'))
)
# pure pursuit controller
pure_pursuit_controller = Node(
package="pure_pursuit_nodes",
node_executable="pure_pursuit_node_exe",
# node_namespace="control",
node_name="pure_pursuit_node",
output="screen",
parameters=[LaunchConfiguration("pure_pursuit_param_file"), {}],
remappings=[
("current_pose", "/vehicle/vehicle_kinematic_state"),
("trajectory", "/planning/trajectory"),
("ctrl_cmd", "/vehicle/control_command"),
("ctrl_diag", "/control/control_diagnostic"),
],
)
rviz2 = Node(
package='rviz2',
node_executable='rviz2',
node_name='rviz2',
arguments=['-d', str(rviz_cfg_path)],
condition=IfCondition(LaunchConfiguration('with_rviz'))
)
urdf_publisher = Node(
package='robot_state_publisher',
node_executable='robot_state_publisher',
node_name='robot_state_publisher',
arguments=[str(urdf_path)]
)
return LaunchDescription(
[
real_time_sim_param,
controller_testing_param,
pure_pursuit_controller_param,
with_rviz_param,
rviz2,
urdf_publisher,
pure_pursuit_controller,
controller_testing, # if not with_rviz
controller_testing_delayed # with_rviz
]
)
```
#### File: lgsvl_interface/launch/lgsvl_joystick.launch.py
```python
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.actions import IncludeLaunchDescription
from launch.substitutions import LaunchConfiguration
from launch.launch_description_sources import PythonLaunchDescriptionSource
from ament_index_python import get_package_share_directory
import os
def get_share_file(package_name, file_name):
return os.path.join(get_package_share_directory(package_name), file_name)
def generate_launch_description():
"""
Launch a minimal joystick + LGSVL demo.
Under the default configuration, the joystick
translator outputs and the LGSVL interface expects RawControlCommand. Controlling the vehicle
can happen via the gamepad triggers and left joystick.
"""
# --------------------------------- Params -------------------------------
# Default joystick translator params
joy_translator_param = DeclareLaunchArgument(
'joy_translator_param',
default_value=[
get_share_file('joystick_vehicle_interface', 'param/logitech_f310.default.param.yaml')
],
description='Path to config file for joystick translator')
# Default lgsvl_interface params
lgsvl_interface_param = DeclareLaunchArgument(
'lgsvl_interface_param',
default_value=[
get_share_file('lgsvl_interface', 'param/lgsvl.param.yaml')
],
description='Path to config file for lgsvl interface')
# -------------------------------- Nodes-----------------------------------
# Include Joystick launch
joystick_launch_file_path = get_share_file('joystick_vehicle_interface',
'launch/joystick_vehicle_interface.launch.py')
joystick = IncludeLaunchDescription(
PythonLaunchDescriptionSource(joystick_launch_file_path),
launch_arguments=[
(
"joy_translator_param",
LaunchConfiguration("joy_translator_param")
)
]
)
# Include LGSVL interface launch
lgsvl_launch_file_path = get_share_file('lgsvl_interface',
'launch/lgsvl.launch.py')
lgsvl = IncludeLaunchDescription(
PythonLaunchDescriptionSource(lgsvl_launch_file_path),
launch_arguments=[
(
"lgsvl_interface_param",
LaunchConfiguration("lgsvl_interface_param")
)
]
)
return LaunchDescription([
joy_translator_param,
lgsvl_interface_param,
joystick,
lgsvl
])
```
#### File: autoware_demos/launch/autoware_academy_ndt_demo_no_odometry.launch.py
```python
from launch import LaunchContext
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
from launch_ros.substitutions import FindPackage
from pathlib import Path
import os
context = LaunchContext()
def get_package_share_directory(package_name):
"""Return the absolute path to the share directory of the given package."""
return os.path.join(Path(FindPackage(package_name).perform(context)), 'share', package_name)
def generate_launch_description():
# Boilerplate to fetch the necessary parameter files:
scan_downsampler_param_file = os.path.join(
get_package_share_directory('autoware_demos'),
'param/autoware_academy_demo/scan_downsampler.param.yaml')
ndt_localizer_param_file = os.path.join(
get_package_share_directory('autoware_demos'),
'param/autoware_academy_demo/ndt_localizer.param.yaml')
pc_filter_transform_param_file = os.path.join(
get_package_share_directory('point_cloud_filter_transform_nodes'),
'param/vlp16_sim_lexus_filter_transform.param.yaml')
map_publisher_param_file = os.path.join(
get_package_share_directory('autoware_demos'), 'param/autoware_academy_demo/map_publisher.param.yaml')
rviz_cfg_path = os.path.join(get_package_share_directory('autoware_demos'),
'rviz2/autoware_academy_demo.rviz')
pc_filter_transform_param = DeclareLaunchArgument(
'pc_filter_transform_param_file',
default_value=pc_filter_transform_param_file,
description='Path to config file for Point Cloud Filter/Transform Nodes'
)
scan_downsampler_param = DeclareLaunchArgument(
'scan_downsampler_param_file',
default_value=scan_downsampler_param_file,
description='Path to config file for lidar scan downsampler'
)
ndt_localizer_param = DeclareLaunchArgument(
'ndt_localizer_param_file',
default_value=ndt_localizer_param_file,
description='Path to config file for ndt localizer'
)
map_publisher_param = DeclareLaunchArgument(
'map_publisher_param_file',
default_value=map_publisher_param_file,
description='Path to config file for Map Publisher'
)
# Node definitions.
filter_transform_vlp16_front = Node(
package='point_cloud_filter_transform_nodes',
node_executable='point_cloud_filter_transform_node_exe',
node_name='filter_transform_vlp16_front',
node_namespace='lidar_front',
parameters=[LaunchConfiguration('pc_filter_transform_param_file')],
remappings=[("points_in", "points_raw")]
)
scan_downsampler = Node(
package='voxel_grid_nodes',
node_executable='voxel_grid_node_exe',
node_namespace='lidar_front',
node_name='voxel_grid_cloud_node',
parameters=[LaunchConfiguration('scan_downsampler_param_file')],
remappings=[
("points_in", "points_filtered"),
("points_downsampled", "points_filtered_downsampled")
]
)
ndt_localizer = Node(
package='ndt_nodes',
node_executable='p2d_ndt_localizer_exe',
node_namespace='localization',
node_name='p2d_ndt_localizer_node',
parameters=[LaunchConfiguration('ndt_localizer_param_file')],
remappings=[
("points_in", "/lidar_front/points_filtered_downsampled")
]
)
map_publisher = Node(
package='ndt_nodes',
node_executable='ndt_map_publisher_exe',
node_namespace='localization',
parameters=[LaunchConfiguration('map_publisher_param_file')]
)
rviz2 = Node(
package='rviz2',
node_executable='rviz2',
node_name='rviz2',
arguments=['-d', str(rviz_cfg_path)],
)
# Since we don't use an odometry source, odometry frame is statically defined to be
# overlapping with the base_link.
# TODO(yunus.caliskan): To be removed after #476
odom_bl_publisher = Node(
package='tf2_ros',
node_executable='static_transform_publisher',
arguments=["0", "0", "0", "0", "0", "0", "odom", "base_link"]
)
return LaunchDescription([
map_publisher_param,
pc_filter_transform_param,
scan_downsampler_param,
ndt_localizer_param,
filter_transform_vlp16_front,
map_publisher,
scan_downsampler,
odom_bl_publisher,
ndt_localizer,
rviz2
])
``` |
{
"source": "jiangtaoli2016/grpc-node",
"score": 2
} |
#### File: tools/buildgen/gen_build_yaml.py
```python
from __future__ import print_function
import re
import os
import sys
import yaml
node_versions = ["4", "5", "6", "7", "8", "9", "10", "11", "12", "13"]
electron_versions = ["1.0", "1.1", "1.2", "1.3", "1.4", "1.5", "1.6", "1.7", "1.8", "2.0", "3.0", "3.1", "4.1", "4.2", "5.0", "6.0", "6.1", "7.0", "7.1", "8.0"]
def gen_linux_configs():
configs = []
node_arches = ["ia32", "x64", "arm", "arm64", "s390x"]
electron_arches = ["ia32", "x64"]
alpine_arches = ["x64"]
for version in node_versions:
for arch in node_arches:
configs.append({
"name": "node_{version}_{arch}_glibc".format(version=version, arch=arch),
"runtime": "node",
"version": version,
"arch": arch,
"libc": "glibc"
})
for arch in alpine_arches:
configs.append({
"name": "node_{version}_{arch}_musl".format(version=version, arch=arch),
"runtime": "node",
"version": version,
"arch": arch,
"libc": "glibc"
})
for version in electron_versions:
for arch in electron_arches:
configs.append({
"name": "electron_{version}_{arch}_glibc".format(version=version, arch=arch),
"runtime": "electron",
"version": version,
"arch": arch,
"libc": "glibc"
})
return configs
def gen_mac_configs():
configs = []
node_arches = ["ia32", "x64"]
electron_arches = ["ia32", "x64"]
for version in node_versions:
for arch in node_arches:
configs.append({
"name": "node_{version}_{arch}".format(version=version, arch=arch),
"runtime": "node",
"version": version,
"arch": arch
})
for version in electron_versions:
for arch in electron_arches:
configs.append({
"name": "electron_{version}_{arch}".format(version=version, arch=arch),
"runtime": "electron",
"version": version,
"arch": arch
})
return configs
def gen_windows_configs():
configs = []
node_arches = ["ia32", "x64"]
electron_arches = ["ia32", "x64"]
for version in node_versions:
for arch in node_arches:
configs.append({
"name": "node_{version}_{arch}".format(version=version, arch=arch),
"runtime": "node",
"version": version,
"arch": arch
})
for version in electron_versions:
for arch in electron_arches:
configs.append({
"name": "electron_{version}_{arch}".format(version=version, arch=arch),
"runtime": "electron",
"version": version,
"arch": arch
})
return configs
out = {
"linux_configs": gen_linux_configs(),
"mac_configs": gen_mac_configs(),
"windows_configs": gen_windows_configs()
}
print(yaml.dump(out))
``` |
{
"source": "jiangtaoo2333/mmpose",
"score": 2
} |
#### File: mmpose/demo/inference_by_jiangtao_face.py
```python
import argparse
import cv2
import json
import os
import os.path as osp
import sys
import time
import warnings
from argparse import ArgumentParser
from tqdm import tqdm
from xtcocotools.coco import COCO
from mmpose.apis import (inference_top_down_pose_model, init_pose_model,
vis_pose_result)
from mmpose.datasets import DatasetInfo
dirpath = osp.dirname(osp.abspath(__file__)).replace('\\','/')
dirpath = osp.dirname(dirpath)
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMDetection models to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
args = parser.parse_args()
return args
class handAlignment():
def __init__(self,
pose_config = '{}/configs/face/2d_kpt_sview_rgb_img/topdown_heatmap/dms/res50_dms_256x256.py'.format(dirpath),
pose_checkpoint = '{}/work_dirs/res50_dms_256x256/best_NME_epoch_42.pth'.format(dirpath),
device = 'cuda:0'):
self.pose_config = pose_config
self.pose_checkpoint = pose_checkpoint
self.device = device
print('self.pose_checkpoint:',self.pose_checkpoint)
self.pose_model = init_pose_model(
self.pose_config, self.pose_checkpoint, device=self.device.lower())
self.dataset = self.pose_model.cfg.data['test']['type']
self.dataset_info = self.pose_model.cfg.data['test'].get('dataset_info', None)
# dataset is a str,dataset_info is a dict
if self.dataset_info is None:
warnings.warn(
'Please set `dataset_info` in the config.'
'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
DeprecationWarning)
else:
self.dataset_info = DatasetInfo(self.dataset_info)
# dataset_info is a instance of DatasetInfo
def alignment(self,img,box):
'''
box:[x,y,w,h]
'''
person = {}
person['bbox'] = box
person_results = []
person_results.append(person)
# test a single image, with a list of bboxes
pose_results, returned_outputs = inference_top_down_pose_model(
self.pose_model,
img,
person_results,
bbox_thr=None,
format='xywh',
dataset=self.dataset,
dataset_info=self.dataset_info,
return_heatmap=True,
outputs=None)
return pose_results,returned_outputs
def save(self, img, outfile, pose_results,returned_outputs):
img = vis_pose_result(
self.pose_model,
img,
pose_results,
dataset=self.dataset,
dataset_info=self.dataset_info,
kpt_score_thr=0.3,
radius=4,
thickness=1,
show=False,
out_file=outfile)
return img
handAlign = handAlignment()
if __name__ == '__main__':
args = parse_args()
handAlign = handAlignment(pose_config=args.config,
pose_checkpoint=args.checkpoint)
filename = './demo/images/face.jpg'
img = cv2.imread(filename,1)
# input box is w y w h format
# pose_results is a list of dict for every box, keys are bbox and keypoints
# returned_outputs is a list of dict for every box, keys are heatmap
pose_results, returned_outputs = handAlign.alignment(img,[495,221,400,400])
print(returned_outputs[0]['heatmap'].shape)
print(returned_outputs[0]['heatmap'][0][0][0][0])
print(returned_outputs[0]['heatmap'][-1][-1][-1][-1])
img = handAlign.save(img,
filename.replace('.jpg','_res.jpg'),
pose_results,
returned_outputs)
print(img.shape)
cv2.imwrite(filename.replace('.jpg','_res1.jpg'),img)
```
#### File: models/backbones/resnet_RM.py
```python
import copy
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (ConvModule, build_conv_layer, build_norm_layer,
constant_init, kaiming_init)
from mmcv.utils.parrots_wrapper import _BatchNorm
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
from torch.nn.modules.batchnorm import _BatchNorm
from .resnet import ResNet
@BACKBONES.register_module()
class RMResNet(ResNet):
def __init__(self,
frozen_parameters,
depth,
**kwargs):
super().__init__(depth,**kwargs)
self.frozen_parameters = frozen_parameters
def _frozen_parameters(self):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
for param in self.parameters():
param.requires_grad = False
def train(self,mode=True):
super().train(mode)
if self.frozen_parameters:
self._frozen_parameters()
```
#### File: models/necks/yolox_pafpn.py
```python
import math
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmcv.runner import BaseModule
from ..builder import NECKS
from ..utils import CSPLayer
@NECKS.register_module()
class YOLOXPAFPN(BaseModule):
"""Path Aggregation Network used in YOLOX.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_csp_blocks (int): Number of bottlenecks in CSPLayer. Default: 3
use_depthwise (bool): Whether to depthwise separable convolution in
blocks. Default: False
upsample_cfg (dict): Config dict for interpolate layer.
Default: `dict(scale_factor=2, mode='nearest')`
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN')
act_cfg (dict): Config dict for activation layer.
Default: dict(type='Swish')
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
in_channels,
out_channels,
num_csp_blocks=3,
use_depthwise=False,
upsample_cfg=dict(scale_factor=2, mode='nearest'),
conv_cfg=None,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='Swish'),
init_cfg=dict(
type='Kaiming',
layer='Conv2d',
a=math.sqrt(5),
distribution='uniform',
mode='fan_in',
nonlinearity='leaky_relu')):
super(YOLOXPAFPN, self).__init__(init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
# build top-down blocks
# self.upsample = nn.Upsample(**upsample_cfg)
self.upsample_layers = nn.ModuleList()
self.reduce_layers = nn.ModuleList()
self.top_down_blocks = nn.ModuleList()
for idx in range(len(in_channels) - 1, 0, -1):
self.reduce_layers.append(
ConvModule(
in_channels[idx],
in_channels[idx - 1],
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.top_down_blocks.append(
CSPLayer(
in_channels[idx - 1] * 2,
in_channels[idx - 1],
num_blocks=num_csp_blocks,
add_identity=False,
use_depthwise=use_depthwise,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.upsample_layers.append(
nn.ConvTranspose2d(in_channels[idx-1], in_channels[idx-1], kernel_size=2, stride=2)
)
# build bottom-up blocks
self.downsamples = nn.ModuleList()
self.bottom_up_blocks = nn.ModuleList()
for idx in range(len(in_channels) - 1):
self.downsamples.append(
conv(
in_channels[idx],
in_channels[idx],
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.bottom_up_blocks.append(
CSPLayer(
in_channels[idx] * 2,
in_channels[idx + 1],
num_blocks=num_csp_blocks,
add_identity=False,
use_depthwise=use_depthwise,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.out_convs = nn.ModuleList()
for i in range(len(in_channels)):
self.out_convs.append(
ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, inputs):
"""
Args:
inputs (tuple[Tensor]): input features.
Returns:
tuple[Tensor]: YOLOXPAFPN features.
"""
assert len(inputs) == len(self.in_channels)
# top-down path
inner_outs = [inputs[-1]]
for idx in range(len(self.in_channels) - 1, 0, -1):
feat_heigh = inner_outs[0]
feat_low = inputs[idx - 1]
feat_heigh = self.reduce_layers[len(self.in_channels) - 1 - idx](
feat_heigh)
inner_outs[0] = feat_heigh
# upsample_feat = self.upsample(feat_heigh)
upsample_feat = self.upsample_layers[len(self.in_channels) - 1 - idx](feat_heigh)
inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx](
torch.cat([upsample_feat, feat_low], 1))
inner_outs.insert(0, inner_out)
# bottom-up path
outs = [inner_outs[0]]
for idx in range(len(self.in_channels) - 1):
feat_low = outs[-1]
feat_height = inner_outs[idx + 1]
downsample_feat = self.downsamples[idx](feat_low)
out = self.bottom_up_blocks[idx](
torch.cat([downsample_feat, feat_height], 1))
outs.append(out)
# out convs
for idx, conv in enumerate(self.out_convs):
outs[idx] = conv(outs[idx])
return tuple(outs)
``` |
{
"source": "jiangtaoo2333/StaticGestureRecognition",
"score": 2
} |
#### File: StaticGestureRecognition/demo/gesture_inference.py
```python
import argparse
import os
import os.path as osp
import sys
import time
import mmcv
import numpy as np
import torch
from mmcv import Config
import torch.nn as nn
import cv2
dirpath = osp.dirname(osp.dirname(osp.abspath(__file__))).replace('\\','/')
sys.path.append(dirpath)
import timm
def get_args():
parser = argparse.ArgumentParser("MultiTaskOnFace build by Jiangtao")
parser.add_argument('--config',
default='{}/configs/gesture/dms_easyNet_crossentroy_cosineannealing_augmix.py'.format(dirpath),help='train config file path')
args = parser.parse_args()
return args
args = get_args()
cfg = Config.fromfile(args.config)
class StaticGesture():
def __init__(self,
cfg=cfg,
checkpoint='easyNet_DMS_gender_best_0.967529296875.pkl'):
self.cfg = cfg
self.model = timm.create_model(self.cfg.modelName, pretrained=False, num_classes=self.cfg.numClasses,
in_chans=self.cfg.channels).cuda()
filename = self.cfg.filename
basefilename = osp.basename(filename)
basefilename = osp.splitext(basefilename)[0]
self.modelPath = osp.join('{}/work_dirs/'.format(dirpath),basefilename)
self.modelPath = osp.join(self.modelPath,checkpoint)
print('self.modelPath:',self.modelPath)
self.model.load_state_dict(torch.load(self.modelPath),strict=False)
self.model.cuda().eval()
def classify(self,image,box):
'''
image is numpy h w
box is [x,y,x,y]
'''
scale = 0.10
xmin,ymin,xmax,ymax = box
roiw = xmax - xmin
roih = ymax - ymin
xmin -= roiw * scale
xmax += roiw * scale
ymin -= roih * scale
ymax += roih * scale
xmin = np.clip(xmin,0,image.shape[1]-1)
xmax = np.clip(xmax,0,image.shape[1]-1)
ymin = np.clip(ymin,0,image.shape[0]-1)
ymax = np.clip(ymax,0,image.shape[0]-1)
x1 = int(xmin)
x2 = int(xmax)
y1 = int(ymin)
y2 = int(ymax)
img = image[y1:y2,x1:x2]
# 输入图片预处理
img = cv2.resize(img, (self.cfg.imgSize,self.cfg.imgSize), interpolation = cv2.INTER_CUBIC)*0.0039216
img = img[np.newaxis] # 1 128 128
img_ = torch.from_numpy(img) # 1 128 128
img_ = img_.unsqueeze_(0) # 1 1 128 128
img_ = img_.cuda()
pre_ = self.model(img_.float())
m = nn.Softmax(dim=1)
pre_ = m(pre_)
pre_ = pre_.cpu().detach().numpy().reshape((1,-1))
res = np.argmax(pre_,axis=-1)
if res[0] == 0:
label = 'palm'
if res[0] == 1:
label = 'singleFinger'
if res[0] == 2:
label = 'doubleFinger'
score = pre_[0][res[0]]
return label,score
if __name__ == '__main__':
SataticGestureCls = StaticGesture()
img = cv2.imread('./demo/images/1.jpg',0)
box = [1057,504,1207,706]
x1,y1,x2,y2 = box
label,score = SataticGestureCls.classify(img,box)
print(label)
print(score)
cv2.rectangle(img, (int(x1),int(y1)), (int(x2),int(y2)), (0,255,0), 2)
cv2.imshow('img',img)
key = cv2.waitKey(0)
if key == ord('q'):
cv2.destroyAllWindows()
```
#### File: dataprocess/dataset/gesture.py
```python
import copy
import inspect
import os
import os.path as osp
import random
import re
import sys
import time
import xml.dom.minidom
from copy import deepcopy
import cv2
import numpy as np
import pandas as pd
import scipy.io as sio
import src.dataprocess.transform.augmentations as augmentations
import torch
from PIL import Image, ImageFilter
from src.dataprocess.transform.dataAug_box import randomAug_box
from torch.utils.data.dataset import Dataset as torchDataset
from torchvision import transforms
from torchvision.transforms import functional as F
from tqdm import tqdm
import json
def getage(xmlFile):
dom = xml.dom.minidom.parse(xmlFile)
root = dom.documentElement
nBoxes = len(root.getElementsByTagName('age'))
if(nBoxes != 1):
print(xmlFile)
sys.exit('{} is not right'.format(xmlFile))
boxes = np.zeros((nBoxes,4))
for iBox in range(nBoxes):
itemlist = root.getElementsByTagName('age')
age = int(float(itemlist[iBox].firstChild.data))
return age
def getgender(xmlFile):
dom = xml.dom.minidom.parse(xmlFile)
root = dom.documentElement
nBoxes = len(root.getElementsByTagName('gender'))
if(nBoxes != 1):
print(xmlFile)
sys.exit('{} is not right'.format(xmlFile))
boxes = np.zeros((nBoxes,4))
for iBox in range(nBoxes):
itemlist = root.getElementsByTagName('gender')
gender = int(float(itemlist[iBox].firstChild.data))
return gender
def getboxes(xmlFile):
if not os.path.exists(xmlFile):
return np.zeros((1,4))
dom = xml.dom.minidom.parse(xmlFile)
root = dom.documentElement
nBoxes = len(root.getElementsByTagName('xmin'))
boxes = np.zeros((nBoxes,4))
for iBox in range(nBoxes):
itemlist = root.getElementsByTagName('xmin')
minX = int(float(itemlist[iBox].firstChild.data))
itemlist = root.getElementsByTagName('ymin')
minY = int(float(itemlist[iBox].firstChild.data))
itemlist = root.getElementsByTagName('xmax')
maxX = int(float(itemlist[iBox].firstChild.data))
itemlist = root.getElementsByTagName('ymax')
maxY = int(float(itemlist[iBox].firstChild.data))
boxes[iBox][0] = minX
boxes[iBox][1] = minY
boxes[iBox][2] = maxX
boxes[iBox][3] = maxY
return boxes
def getbox(json_file):
if json_file.endswith('.json'):
with open(json_file,'rb') as f:
data = json.load(f)
points = data['shapes'][0]['points']
x,y,w,h = points[0],points[1],points[2]-points[0],points[3]-points[1]
elif json_file.endswith('.xml'):
boxes = getboxes(json_file)
box = boxes[0]
x,y,w,h = box[0],box[1],box[2]-box[0],box[3]-box[1]
else:
print(json_file)
sys.exit()
return [x,y,w,h]
def randomAug_boxV2(img,box,scale):
height, width = img.shape[0:2]
x1,y1,x2,y2 = box[0][0],box[0][1],box[0][2],box[0][3]
w = x2 - x1
h = y2 - y1
if w < 20 or h < 20:
return (False,'w or h is very small')
if random.random() < 0.5:
delta_x1 = np.random.randint(0,int(w * scale))
delta_y1 = np.random.randint(0,int(h * scale))
delta_x2 = np.random.randint(0,int(w * scale))
delta_y2 = np.random.randint(0,int(h * scale))
else:
delta_x1 = np.random.randint(int(w * scale), int(w * scale * 2))
delta_y1 = np.random.randint(int(h * scale), int(h * scale * 2))
delta_x2 = np.random.randint(int(w * scale), int(w * scale * 2))
delta_y2 = np.random.randint(int(h * scale), int(h * scale * 2))
nx1 = max(x1 - delta_x1,0)
ny1 = max(y1 - delta_y1,0)
nx2 = min(x2 + delta_x2,width)
ny2 = min(y2 + delta_y2,height)
if (ny2 < ny1 + 20) or (nx2 < nx1 + 20):
return (False,'ny2 or nx2 is very small')
# 将点归一化到裁剪区域中
x1 = (x1 - nx1) * 128 / (nx2 - nx1)
y1 = (y1 - ny1) * 128 / (ny2 - ny1)
x1 = x1 / 128.0000000000
y1 = y1 / 128.0000000000
x2 = (x2 - nx1) * 128 / (nx2 - nx1)
y2 = (y2 - ny1) * 128 / (ny2 - ny1)
x2 = x2 / 128.0000000000
y2 = y2 / 128.0000000000
cropped_im = img[int(ny1): int(ny2), int(nx1): int(nx2)]
return (True, cropped_im, [x1,y1,x2,y2])
def aug(image, preprocess,all_ops=True):
"""Perform AugMix augmentations and compute mixture.
Args:
image: PIL.Image input image
preprocess: Preprocessing function which should return a torch tensor.
Returns:
mixed: Augmented and mixed image.
"""
aug_list = augmentations.augmentations
if all_ops:
aug_list = augmentations.augmentations_all
ws = np.float32(np.random.dirichlet([1] * 3))
m = np.float32(np.random.beta(1, 1))
mix = torch.zeros_like(preprocess(image))
for i in range(3):
image_aug = image.copy()
depth = np.random.randint(1, 4)
for _ in range(depth):
op = np.random.choice(aug_list)
image_aug = op(image_aug, 3)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * preprocess(image_aug)
mixed = (1 - m) * preprocess(image) + m * mix
return mixed
class DatasetGesture(torchDataset):
def __init__(self, imgDir, imgTxt, size=128, imgChannel=1, isTrain='train'):
self.size = size
self.imgTxt = os.path.join(imgDir,imgTxt)
with open(self.imgTxt,'r') as f:
lines = f.readlines()
# get imgpath
self.imgPathList = []
self.xmlPathList = []
self.labelList = []
for line in tqdm(lines):
imgFile = imgDir + line.strip().split(' ')[0]
xmlFile = osp.splitext(imgFile)[0] + '.xml'
jsonFile = osp.splitext(imgFile)[0] + '.json'
label = int(line.strip().split(' ')[1])
self.imgPathList.append(imgFile)
if osp.exists(xmlFile):
self.xmlPathList.append(xmlFile)
if osp.exists(jsonFile):
self.xmlPathList.append(jsonFile)
self.labelList.append(label)
assert len(self.imgPathList) == len(self.xmlPathList)
print('isTrain:',isTrain)
print('len(self.imgPathList):',len(self.imgPathList))
print('len(self.xmlPathList):',len(self.xmlPathList))
def __len__(self):
return len(self.imgPathList)
def __getitem__(self,index):
img = cv2.imread(self.imgPathList[index],0)
box = getbox(self.xmlPathList[index]) #[x y w h]
gesture = self.labelList[index]
# get new img and new box
box[2] = box[0] + box[2]
box[3] = box[1] + box[3]
box = [box] #[[x1,y1,x2,y2]]
img_new, box_new = randomAug_box(img,box)
ret = randomAug_boxV2(img_new,box_new,0.15)
if(ret[0] == False):
print('box_ori:',box_ori)
print('box:',box)
print('box_new:',box_new)
sys.exit('{} have problem:{}'.format(self.imgPathList[index],ret[1]))
else:
cropped_im = ret[1]
resized_im = cv2.resize(cropped_im, (self.size, self.size), interpolation=cv2.INTER_LINEAR).astype('float')*0.0039216
resized_im = resized_im[np.newaxis,]
return resized_im, torch.FloatTensor([gesture]).type(torch.FloatTensor)
class DatasetGestureSim(DatasetGesture):
'''
return ori PIL image for augmix
'''
def __init__(self, imgDir, imgTxt, size=128, imgChannel=1, isTrain='train'):
super(DatasetGestureSim, self).__init__(imgDir, imgTxt, size=size,isTrain=isTrain)
def __len__(self):
return len(self.imgPathList)
def __getitem__(self,index):
img = cv2.imread(self.imgPathList[index],1)
box = getbox(self.xmlPathList[index])
gesture = self.labelList[index]
# get new img and new box
box[2] = box[0] + box[2]
box[3] = box[1] + box[3]
box = [box] #[[x1,y1,x2,y2]]
img_new, box_new = randomAug_box(img,box)
ret = randomAug_boxV2(img_new,box_new,0.15)
if(ret[0] == False):
sys.exit('{} have problem:{}'.format(self.imgPathList[index],ret[1]))
else:
cropped_im = ret[1]
resized_im = cv2.resize(cropped_im, (self.size, self.size), interpolation=cv2.INTER_LINEAR)
image = Image.fromarray(cv2.cvtColor(resized_im,cv2.COLOR_BGR2RGB))
return image, torch.FloatTensor([gesture]).type(torch.FloatTensor)
def preprocess(imagePil):
imagePil = imagePil.convert("L")
imageNp = np.asarray(imagePil)
imageNp = imageNp[:,:,np.newaxis] # 128 128 1
imageTensor = F.to_tensor(imageNp)
return imageTensor
class AugMixDataset(torch.utils.data.Dataset):
"""Dataset wrapper to perform AugMix augmentation."""
def __init__(self, dataset, preprocess=preprocess, no_jsd=False):
self.dataset = dataset
self.preprocess = preprocess
self.no_jsd = no_jsd
def __getitem__(self, i):
x, y = self.dataset[i]
# x is PIL Image shape is 128 128 3
if self.no_jsd:
return aug(x, self.preprocess), y
else:
im_tuple = (self.preprocess(x), aug(x, self.preprocess),
aug(x, self.preprocess))
return im_tuple, y
def __len__(self):
return len(self.dataset)
```
#### File: src/loss/CircleLoss.py
```python
from typing import Tuple
import torch
from torch import nn, Tensor
class SparseCircleLoss(nn.Module):
def __init__(self, m: float, emdsize: int ,class_num: int, gamma: float) -> None:
super(SparseCircleLoss, self).__init__()
self.margin = m
self.gamma = gamma
self.soft_plus = nn.Softplus()
self.class_num = class_num
self.emdsize = emdsize
self.weight = nn.Parameter(torch.FloatTensor(self.class_num, self.emdsize))
nn.init.xavier_uniform_(self.weight)
self.use_cuda = False
def forward(self, input: Tensor, label: Tensor) -> Tensor:
similarity_matrix = nn.functional.linear(nn.functional.normalize(input,p=2, dim=1, eps=1e-12), nn.functional.normalize(self.weight,p=2, dim=1, eps=1e-12))
if self.use_cuda:
one_hot = torch.zeros(similarity_matrix.size(), device='cuda')
else:
one_hot = torch.zeros(similarity_matrix.size())
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
one_hot = one_hot.type(dtype=torch.bool)
#sp = torch.gather(similarity_matrix, dim=1, index=label.unsqueeze(1))
sp = similarity_matrix[one_hot]
mask = one_hot.logical_not()
sn = similarity_matrix[mask]
sp = sp.view(input.size()[0], -1)
sn = sn.view(input.size()[0], -1)
ap = torch.clamp_min(-sp.detach() + 1 + self.margin, min=0.)
an = torch.clamp_min(sn.detach() + self.margin, min=0.)
delta_p = 1 - self.margin
delta_n = self.margin
logit_p = - ap * (sp - delta_p) * self.gamma
logit_n = an * (sn - delta_n) * self.gamma
loss = self.soft_plus(torch.logsumexp(logit_n, dim=1) + torch.logsumexp(logit_p, dim=1))
return loss.mean()
if __name__ == "__main__":
features = torch.rand(64, 128, requires_grad=True)
label = torch.randint(high=9, size=(64,))
SparseCircle = SparseCircleLoss(m=0.25, emdsize=128, class_num=10, gamma=64)
loss = SparseCircle(features , label)
print(loss)
```
#### File: src/loss/label_smothing_cross_entropy_loss.py
```python
import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _WeightedLoss
class LabelSmoothCrossEntropyLoss(_WeightedLoss):
def __init__(self, weight=None, reduction='mean', smoothing=0.0):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
@staticmethod
def _smooth_one_hot(targets: torch.Tensor, n_classes: int, smoothing=0.0):
assert 0 <= smoothing < 1
targets = torch.empty(size=(targets.size(0), n_classes),device=targets.device) \
.fill_(smoothing / (n_classes - 1)) \
.scatter_(1, targets.data.unsqueeze(1), 1. - smoothing)
return targets
def forward(self, inputs, targets):
'''
inputs N,C
targets N,1
'''
targets = LabelSmoothCrossEntropyLoss._smooth_one_hot(targets, inputs.size(-1),
self.smoothing)
lsm = F.log_softmax(inputs, -1)
if self.weight is not None:
lsm = lsm * self.weight.unsqueeze(0)
loss = -(targets * lsm).sum(-1)
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'mean':
loss = loss.mean()
return loss
```
#### File: src/loss/lightning.py
```python
import torch.nn.functional as F
import os
import torch
import sys
from fmix import *
class FMix(FMixBase):
r""" FMix augmentation
Args:
decay_power (float): Decay power for frequency decay prop 1/f**d
alpha (float): Alpha value for beta distribution from which to sample mean of mask
size ([int] | [int, int] | [int, int, int]): Shape of desired mask, list up to 3 dims
max_soft (float): Softening value between 0 and 0.5 which smooths hard edges in the mask.
reformulate (bool): If True, uses the reformulation of [1].
Example
-------
.. code-block:: python
class FMixExp(pl.LightningModule):
def __init__(*args, **kwargs):
self.fmix = Fmix(...)
# ...
def training_step(self, batch, batch_idx):
x, y = batch
x = self.fmix(x)
feature_maps = self.forward(x)
logits = self.classifier(feature_maps)
loss = self.fmix.loss(logits, y)
# ...
return loss
"""
def __init__(self, decay_power=3, alpha=1, size=(32, 32), max_soft=0.0, reformulate=False):
super().__init__(decay_power, alpha, size, max_soft, reformulate)
def __call__(self, x):
# Sample mask and generate random permutation
lam, mask = sample_mask(self.alpha, self.decay_power, self.size, self.max_soft, self.reformulate)
index = torch.randperm(x.size(0)).to(x.device)
mask = torch.from_numpy(mask).float().to(x.device)
# Mix the images
x1 = mask * x
x2 = (1 - mask) * x[index]
self.index = index
self.lam = lam
return x1+x2
def loss(self, y_pred, y, train=True):
return fmix_loss(y_pred, y, self.index, self.lam, train, self.reformulate)
if __name__ == '__main__':
lam, mask = sample_mask(alpha=1, decay_power=3,shape=(128,128))
print(lam)
print(mask)
print(type(mask))
print(mask.shape)
```
#### File: src/loss/mixup.py
```python
import numpy as np
np.set_printoptions(threshold=np.inf)
import torch
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def mixup_data(x, y, device,alpha=1.0,):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
index = torch.randperm(batch_size).to(device)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def randomMask(imgData,device):
batchSize = imgData.shape[0]
for i in range(batchSize):
mask = np.random.uniform(low=0.0, high=1.0, size=(32,32))
mask = torch.from_numpy(mask).to(device)
x,y = np.random.randint(0, 4, size=(2,), dtype='l')
imgData[i][0][(x)*32:(x+1)*32,(y)*32:(y+1)*32] = mask
return imgData
def mixup_criterion(pred, y_a, y_b, lam):
return lam * F.nll_loss(pred, y_a) + (1 - lam) * F.nll_loss(pred, y_b)
if __name__ =='__main__':
imgData1 = np.random.uniform(low=0.0, high=1.0, size=(1,1,128,128))
print(imgData1[0][0])
imgData2 = randomMask(imgData1)
print(imgData2[0][0])
```
#### File: network/backbone/basic_network.py
```python
import os
from collections import OrderedDict
import torch
import torch.nn as nn
from .. import layer
class multi_out_5(nn.Module):
def __init__(self,):
super(multi_out_5, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone()
self.mouthBone = layer.mouthBone()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone()
self.emotionBone = layer.emotionBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
return [stage1,stage2,stage3,stage4,stage5]
class multi_out_5_multiscale(nn.Module):
def __init__(self,):
super(multi_out_5_multiscale, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone()
self.emotionBone = layer.emotionBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
return [stage1,stage2,stage3,stage4,stage5]
class multi_out_5_multiscale_WITH_BG(nn.Module):
def __init__(self,):
super(multi_out_5_multiscale_WITH_BG, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone()
self.emotionBone = layer.emotionBone_WITH_BG()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
return [stage1,stage2,stage3,stage4,stage5]
class multi_out_5_PFLD(nn.Module):
def __init__(self,):
super(multi_out_5_PFLD, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone()
self.mouthBone = layer.mouthBone()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone()
self.emotionBone = layer.emotionBone()
self.angleBone = layer.angleBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6]
class multi_out_5_elementwise(nn.Module):
def __init__(self,):
super(multi_out_5_elementwise, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_elementwise()
self.mouthBone = layer.mouthBone_elementwise()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone()
self.emotionBone = layer.emotionBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
return [stage1,stage2,stage3,stage4,stage5]
class multi_out_5_multiLabel(nn.Module):
def __init__(self,):
super(multi_out_5_multiLabel, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.eyeBone_multiscale()
self.faceBone = layer.faceBone_multiLabel()
self.detectBone = layer.detectBone()
self.emotionBone = layer.emotionBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
return [stage1,stage2,stage3,stage4,stage5]
class multi_out_5_multiscale(nn.Module):
def __init__(self,):
super(multi_out_5_multiscale, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone()
self.emotionBone = layer.emotionBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
return [stage1,stage2,stage3,stage4,stage5]
class multi_out_5_multiscale_new(nn.Module):
def __init__(self,):
super(multi_out_5_multiscale_new, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
return [stage1,stage2,stage3,stage4,stage5]
class multi_out_5_multiScale_blurpool(nn.Module):
def __init__(self,):
super(multi_out_5_multiScale_blurpool, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone_blurpool()
self.detectBone = layer.detectBone()
self.emotionBone = layer.emotionBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
return [stage1,stage2,stage3,stage4,stage5]
class multi_out_2(nn.Module):
def __init__(self,):
super(multi_out_2, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
return [stage1,stage2]
class multi_out_HeatMap(nn.Module):
def __init__(self,):
super(multi_out_HeatMap, self).__init__()
self.baseBone = layer.backBone()
self.eyeHeatmapBone = layer.eyeHeatmap()
self.mouthHeatmapBone = layer.mouthHeatmap()
self.eyeBone = layer.eyeBone_multiscale_heatmap()
self.mouthBone = layer.mouthBone_multiscale_heatmap()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeHeatmapBone(stage0)
stage2 = self.mouthHeatmapBone(stage0)
stage3 = self.eyeBone(stage0,stage1)
stage4 = self.mouthBone(stage0,stage2)
return [stage1,stage2,stage3,stage4]
class multi_out_5_multiscale_SSD(nn.Module):
def __init__(self,):
super(multi_out_5_multiscale_SSD, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_SSD()
self.emotionBone = layer.emotionBone_WITH_BG()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
return [stage1,stage2,stage3,stage4,stage5]
class multi_out_8(nn.Module):
def __init__(self,):
super(multi_out_8, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.YawBone = layer.YawBone()
self.PitchBone = layer.PitchBone()
self.RollBone = layer.RollBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.YawBone(stage0)
stage7 = self.PitchBone(stage0)
stage8 = self.RollBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8]
class multi_out_5_multiscale_new_new(nn.Module):
def __init__(self,):
super(multi_out_5_multiscale_new_new, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale_new()
self.emotionBone = layer.emotionBone_WITH_BG()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
return [stage1,stage2,stage3,stage4,stage5]
class multi_out_8_new(nn.Module):
def __init__(self,):
super(multi_out_8_new, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale_new()
self.emotionBone = layer.emotionBone_WITH_BG()
self.YawBone = layer.YawBone()
self.PitchBone = layer.PitchBone()
self.RollBone = layer.RollBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.YawBone(stage0)
stage7 = self.PitchBone(stage0)
stage8 = self.RollBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8]
class multi_out_9(nn.Module):
def __init__(self,):
super(multi_out_9, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.binaryBone = layer.BinaryBone()
self.YawBone = layer.YawBone()
self.PitchBone = layer.PitchBone()
self.RollBone = layer.RollBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.binaryBone(stage0)
stage7 = self.YawBone(stage0)
stage8 = self.PitchBone(stage0)
stage9 = self.RollBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8,stage9]
class multi_out_2_3channel(nn.Module):
def __init__(self,):
super(multi_out_2_3channel, self).__init__()
self.baseBone = layer.backBone_3channel()
self.eyeBone = layer.eyeBone()
self.mouthBone = layer.mouthBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
return [stage1,stage2]
class multi_out_5_multiscale_3channel(nn.Module):
def __init__(self,):
super(multi_out_5_multiscale_3channel, self).__init__()
self.baseBone = layer.backBone_3channel()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone()
self.emotionBone = layer.emotionBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
return [stage1,stage2,stage3,stage4,stage5]
class multi_out_6(nn.Module):
def __init__(self,):
super(multi_out_6, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.FaceAreaBone = layer.FaceAreaBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.FaceAreaBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6]
class multi_yolo(nn.Module):
def __init__(self,):
super(multi_yolo, self).__init__()
self.baseBone = layer.backBone()
self.detectBone = layer.detectBone_YOLO()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.detectBone(stage0)
return stage1
class multi_out_5_HeatMap(nn.Module):
def __init__(self,):
super(multi_out_5_HeatMap, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_HeatMap()
self.emotionBone = layer.emotionBone_WITH_BG()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
return [stage1,stage2,stage3,stage4,stage5]
class multi_out_6_Binary_HeatMap(nn.Module):
def __init__(self,):
super(multi_out_6_Binary_HeatMap, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_HeatMap()
self.emotionBone = layer.emotionBone_WITH_BG()
self.FaceAreaBone = layer.FaceAreaBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.FaceAreaBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6]
class multi_out_6_Binary(nn.Module):
def __init__(self,):
super(multi_out_6_Binary, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.binaryBone = layer.BinaryBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.binaryBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6]
class single_out_facearea(nn.Module):
def __init__(self,):
super(single_out_facearea, self).__init__()
self.FaceArea = layer.FaceAreaBone_new()
def forward(self,x):
out = self.FaceArea(x)
return out
class multi_out_6_FaceArea(nn.Module):
def __init__(self,):
super(multi_out_6_FaceArea, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
# self.FaceAreaBone = layer.FaceAreaBone_five()
self.FaceAreaBone = layer.FaceAreaBone_seven()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.FaceAreaBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6]
class multi_out_6_Angle(nn.Module):
def __init__(self,):
super(multi_out_6_Angle, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6]
class multi_out_7(nn.Module):
def __init__(self,):
super(multi_out_7, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone()
self.FaceAreaBone = layer.FaceAreaBone_seven()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7]
class multi_out_8(nn.Module):
def __init__(self,):
super(multi_out_8, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone()
self.FaceAreaBone = layer.FaceAreaBone_seven()
self.recogBone = layer.RecognitionBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.recogBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8]
class multi_out_8_angle_new(nn.Module):
def __init__(self,):
super(multi_out_8_angle_new, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven()
self.recogBone = layer.RecognitionBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.recogBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8]
class multi_out_7_0911(nn.Module):
def __init__(self,):
super(multi_out_7_0911, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7]
class multi_out_8_20210607(nn.Module):
def __init__(self,):
super(multi_out_8_20210607, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
self.alignQualityBone = layer.alignQualityBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.alignQualityBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8]
class multi_out_8_20210426(nn.Module):
def __init__(self,):
super(multi_out_8_20210426, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
self.binaryFaceBone = layer.binaryFaceBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.binaryFaceBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8]
class singleGazeBone(nn.Module):
def __init__(self,):
super(singleGazeBone, self).__init__()
self.baseBone = layer.backBone()
self.gazeBone = layer.GazeBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.gazeBone(stage0)
return [stage1]
class singleGazeBone_5(nn.Module):
def __init__(self,):
super(singleGazeBone_5, self).__init__()
self.baseBone = layer.backBone()
self.gazeBone = layer.GazeBone_5()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.gazeBone(stage0)
return [stage1]
class multi_out_5_0517(nn.Module):
def __init__(self,):
super(multi_out_5_0517, self).__init__()
# self.baseBone = layer.backBone()
# self.eyeBone = layer.eyeBone_multiscale_1111()
# self.mouthBone = layer.mouthBone_multiscale()
# self.faceBone = layer.faceBone()
# self.detectBone = layer.detectBone_multiScale()
# self.emotionBone = layer.emotionBone_WITH_BG()
# self.angleBone = layer.angleBone_six()
# self.FaceAreaBone = layer.FaceAreaBone_multiscale()
# self.recogBone = layer.RecognitionBone()
# self.FaceCls = layer.KLOccCls()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
self.FaceCls = layer.KLOccCls()
def forward(self,x,idTargets=None):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
# stage8 = self.recogBone(stage0,idTargets)
stage8 = self.FaceCls(stage0)
# return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8, stage9]
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8]
class multi_out_9_20210519(nn.Module):
def __init__(self,):
super(multi_out_9_20210519, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
self.binaryFaceBone = layer.binaryFaceBone()
self.gazeBone = layer.GazeBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.binaryFaceBone(stage0)
stage9 = self.gazeBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8,stage9]
class multi_out_9_20210525(nn.Module):
def __init__(self,):
super(multi_out_9_20210525, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
self.binaryFaceBone = layer.binaryFaceBone()
self.angleRegBone = layer.AngleBoneReg()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.binaryFaceBone(stage0)
stage9 = self.angleRegBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8,stage9]
class multi_out_8_1010(nn.Module):
def __init__(self,):
super(multi_out_8_1010, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
self.recogBone = layer.RecognitionBone()
def forward(self,x,idTargets=None):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.recogBone(stage0,idTargets)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8]
class multi_out_8_1111(nn.Module):
def __init__(self,):
super(multi_out_8_1111, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale_1111()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
self.recogBone = layer.RecognitionBone()
def forward(self,x,idTargets=None):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.recogBone(stage0,idTargets)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8]
class multi_out_9_0408(nn.Module):
def __init__(self,):
super(multi_out_9_0408, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale_1111()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
self.recogBone = layer.RecognitionBone()
self.FaceQuality = layer.QualityCls()
def forward(self,x,idTargets=None):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.recogBone(stage0,idTargets)
stage9 = self.FaceQuality(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8, stage9]
class multi_out_8_0104(nn.Module):
def __init__(self,):
super(multi_out_8_0104, self).__init__()
self.baseBone = layer.backBone0104()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
self.recogBone = layer.RecognitionBone()
def forward(self,x,idTargets=None):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.recogBone(stage0,idTargets)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8]
class Onet(nn.Module):
def __init__(self,):
super(Onet, self ).__init__()
self.baseBone = layer.eyeBone_Onet()
def forward(self,x):
x = self.baseBone(x)
return [x]
class OnetV2(nn.Module):
def __init__(self,):
super(OnetV2, self ).__init__()
self.baseBone = layer.eyeBone_OnetV2()
def forward(self,x):
x = self.baseBone(x)
return [x]
class OnetV3(nn.Module):
def __init__(self,):
super(OnetV3,self).__init__()
self.baseBone = layer.baseBone_Onet()
self.alignBone = layer.alignBone_Onet()
self.binaryFaceBone = layer.binaryBone_Onet()
def forward(self,x):
x = self.baseBone(x)
x1 = self.alignBone(x)
x2 = self.binaryFaceBone(x)
return [x1,x2]
class OnetV4(nn.Module):
def __init__(self,):
super(OnetV4,self).__init__()
self.baseBone = layer.baseBone_Onet()
self.alignBone = layer.alignBone_Onet()
self.binaryFaceBone = layer.binaryBone_Onet()
self.gazeBone = layer.gazeBone_Onet()
def forward(self,x):
x = self.baseBone(x)
x1 = self.alignBone(x)
x2 = self.binaryFaceBone(x)
x3 = self.gazeBone(x)
return [x1,x2,x3]
class multi_out_12(nn.Module):
def __init__(self,):
super(multi_out_12, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
self.recogBone = layer.RecognitionBone()
self.eyeBone_right = layer.eyeBone_multiscale()
self.mouthBone_right = layer.mouthBone_multiscale()
self.eyeBone_left = layer.eyeBone_multiscale()
self.mouthBone_left = layer.mouthBone_multiscale()
def forward(self,x,idTargets=None):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.recogBone(stage0,idTargets)
stage9 = self.eyeBone_right(stage0)
stage10 = self.mouthBone_right(stage0)
stage11 = self.eyeBone_left(stage0)
stage12 = self.mouthBone_left(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8,
stage9,stage10,stage11,stage12]
class multi_out_12_20210406(nn.Module):
def __init__(self,):
super(multi_out_12_20210406, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale_20210406()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
self.recogBone = layer.RecognitionBone()
self.eyeBone_right = layer.eyeBone_multiscale()
self.mouthBone_right = layer.mouthBone_multiscale()
self.eyeBone_left = layer.eyeBone_multiscale()
self.mouthBone_left = layer.mouthBone_multiscale()
def forward(self,x,idTargets=None):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.recogBone(stage0,idTargets)
stage9 = self.eyeBone_right(stage0)
stage10 = self.mouthBone_right(stage0)
stage11 = self.eyeBone_left(stage0)
stage12 = self.mouthBone_left(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8,
stage9,stage10,stage11,stage12]
class multi_out_10_20210526(nn.Module):
def __init__(self,):
super(multi_out_10_20210526, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
self.binaryFaceBone = layer.binaryFaceBone()
self.gazeBone = layer.GazeBone()
self.angleRegBone = layer.AngleBoneReg()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.binaryFaceBone(stage0)
stage9 = self.gazeBone(stage0)
stage10 = self.angleRegBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8,stage9,stage10]
class pose(nn.Module):
def __init__(self):
super().__init__()
self.baseBone = layer.backBone()
self.poseBone = layer.PoseBone()
def forward(self,x):
x = self.baseBone(x)
yaw, pitch, roll = self.poseBone(x)
return [yaw, pitch, roll]
class multi_out_9_20210615(nn.Module):
def __init__(self,):
super(multi_out_9_20210615, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
self.binaryFaceBone = layer.binaryFaceBone()
self.poseBone = layer.PoseBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.binaryFaceBone(stage0)
stage9 = self.poseBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8,stage9]
class multi_out_11_20210721(nn.Module):
def __init__(self,):
super(multi_out_11_20210721, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
self.binaryFaceBone = layer.binaryFaceBone()
self.angleRegBone = layer.AngleBoneReg()
self.poseBone = layer.PoseBone()
self.FaceCls = layer.KLOccCls()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.binaryFaceBone(stage0)
stage9 = self.angleRegBone(stage0)
stage10 = self.poseBone(stage0)
stage11 = self.FaceCls(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8,stage9,stage10,stage11]
class multi_out_13_20211008(nn.Module):
def __init__(self,):
super(multi_out_13_20211008, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
self.binaryFaceBone = layer.binaryFaceBone()
self.angleRegBone = layer.AngleBoneReg()
self.poseBone = layer.PoseBone()
self.FaceCls = layer.KLOccCls()
self.genderBone = layer.genderClassify()
self.ageBone = layer.ageClassify()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.binaryFaceBone(stage0)
stage9 = self.angleRegBone(stage0)
stage10 = self.poseBone(stage0)
stage11 = self.FaceCls(stage0)
stage12 = self.genderBone(stage0)
stage13 = self.ageBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,
stage7,stage8,stage9,stage10,stage11,stage12,stage13]
class multi_out_13_20210819(nn.Module):
def __init__(self,):
super(multi_out_13_20210819, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
self.binaryFaceBone = layer.binaryFaceBone()
self.angleRegBone = layer.AngleBoneReg()
self.poseBone = layer.PoseBone()
self.FaceCls = layer.KLOccCls()
self.eyeBone_right = layer.eyeBone_multiscale()
self.eyeBone_left = layer.eyeBone_multiscale()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.binaryFaceBone(stage0)
stage9 = self.angleRegBone(stage0)
stage10 = self.poseBone(stage0)
stage11 = self.FaceCls(stage0)
stage12 = self.eyeBone_right(stage0)
stage13 = self.eyeBone_left(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8,stage9,stage10,stage11,
stage12,stage13]
class multi_out_8_20210803(nn.Module):
def __init__(self,):
super(multi_out_8_20210803, self).__init__()
self.baseBone = layer.backBone()
self.eyeBone = layer.eyeBone_multiscale()
self.mouthBone = layer.mouthBone_multiscale()
self.faceBone = layer.faceBone()
self.detectBone = layer.detectBone_multiScale()
self.emotionBone = layer.emotionBone_WITH_BG()
self.angleBone = layer.angleBone_six()
self.FaceAreaBone = layer.FaceAreaBone_seven_new()
self.WrinkleBone = layer.WrinkleBone()
def forward(self,x):
stage0 = self.baseBone(x)
stage1 = self.eyeBone(stage0)
stage2 = self.mouthBone(stage0)
stage3 = self.faceBone(stage0)
stage4 = self.detectBone(stage0)
stage5 = self.emotionBone(stage0)
stage6 = self.angleBone(stage0)
stage7 = self.FaceAreaBone(stage0)
stage8 = self.WrinkleBone(stage0)
return [stage1,stage2,stage3,stage4,stage5,stage6,stage7,stage8]
```
#### File: src/utils/check_xml.py
```python
import cv2
import numpy as np
import glob
import os
import xml.dom.minidom
import random
import math
from tqdm import tqdm
import time
def getlist(dir,extension,Random=False):
list = []
for root, dirs, files in os.walk(dir, topdown=False):
for name in dirs:
print(os.path.join(root, name))
for name in files:
filename,ext = os.path.splitext(name)
if extension == ext:
list.append(os.path.join(root,name))
list[-1] = list[-1].replace('\\','/')
if Random:
random.shuffle(list)
return list
def randombaoguang(img):
if len(img.shape) == 2:
img = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
#将bgr转化为hsv
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img = img.astype(np.float)
#获取v通道(颜色亮度通道),并做渐变性的增强
img[:, :, 2] = np.where(img[:, :, 2] > 100, img[:, :, 2] + 20.0, img[:, :, 2])
img[:, :, 2] = np.where(img[:, :, 2] > 150, img[:, :, 2] + 30.0, img[:, :, 2])
img[:, :, 2] = np.where(img[:, :, 2] > 180, img[:, :, 2] + 40.0, img[:, :, 2])
#令大于255的像素值等于255(防止溢出)
img = np.where(img>255, 255, img)
img = img.astype(np.uint8)
res = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
return res
def functiona(img):
x = random.choice([17,18,19])
y = random.choice([2,3])
img = x*math.log(1+img,y)
return img
def randomLogTransform(img):
if len(img.shape) == 3:
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
H,W = img.shape[0:2]
x = random.choice([17,18,19])
y = random.choice([2,3])
for i in range(H):
for j in range(W):
img[i][j] = x*math.log(1+img[i][j],y)
return img
def getboxes(xmlFile):
if not os.path.exists(xmlFile):
return np.zeros((1,4))
dom = xml.dom.minidom.parse(xmlFile)
root = dom.documentElement
nBoxes = len(root.getElementsByTagName('xmin'))
boxes = np.zeros((nBoxes,4))
for iBox in range(nBoxes):
itemlist = root.getElementsByTagName('xmin')
minX = int(float(itemlist[iBox].firstChild.data))
itemlist = root.getElementsByTagName('ymin')
minY = int(float(itemlist[iBox].firstChild.data))
itemlist = root.getElementsByTagName('xmax')
maxX = int(float(itemlist[iBox].firstChild.data))
itemlist = root.getElementsByTagName('ymax')
maxY = int(float(itemlist[iBox].firstChild.data))
boxes[iBox][0] = minX
boxes[iBox][1] = minY
boxes[iBox][2] = maxX
boxes[iBox][3] = maxY
return boxes
with open('/jiangtao2/code_with_git/MultiTaskOnFaceRebuild/detectLossList.txt','r') as f:
lines = f.readlines()
annotations = lines
imgList = []
for annotation in annotations:
strings = annotation.strip().split(' ')
imgFile = ''
for i in range(len(strings)-1):
if i != 0:
strings[i] = ' ' + strings[i]
imgFile += strings[i]
# imgFile = imgFile.replace('/jiangtao2/', 'J:/')
imgList.append(imgFile)
# print(imgList[0:5])
# time.sleep(1000)
# DIR = 'D:/images_check/smoke/'
# imgList = getlist(DIR,'.jpg')
# imgList += getlist(DIR,'.JPG')
for img_file in tqdm(imgList[0:3500]):
try:
# if ('84659e86-9393-11eb-b3c5-d0c637e007cb' not in img_file):
# continue
# print(img_file)
img_file = img_file.replace('\\','/')
img = cv2.imread(img_file,0)
# if 1280 != img.shape[0]:
# continue
xml_file = img_file
xml_file = os.path.splitext(xml_file)[0] + '.xml'
if not os.path.exists(xml_file):
print('{} do not have xml'.format(img_file))
continue
boxes = getboxes(xml_file)
# print(boxes)
for box in boxes:
minX = box[0]
minY = box[1]
maxX = box[2]
maxY = box[3]
# if(int(maxX) - int(minX) < 100) and ((1280 - int(maxX)) < 50):
# print(img_file)
# # os.remove(xml_file)
# # os.remove(img_file)
# continue
cv2.rectangle(img,(int(float(minX)),int(float(minY))),(int(float(maxX)),int(float(maxY))),(255,0,0),1)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'leftup',(int(minX),int(minY)), font, 0.4, (0, 0, 255), 1)
cv2.putText(img,'rightdown',(int(maxX),int(maxY)), font, 0.4, (0, 255, 0), 1)
if(0):
cv2.imshow('a',img)
cv2.waitKey(0)
if(1):
targetDir = '/jiangtao2/code_with_git/MultiTaskOnFaceRebuild/images/detectTest'
if not os.path.exists(targetDir):
os.makedirs(targetDir)
cv2.imwrite(os.path.join(targetDir,os.path.basename(img_file)),img)
``` |
{
"source": "jiangtiantu/hikyuu",
"score": 3
} |
#### File: admin/service/user.py
```python
import logging
from .restful import session_get, session_post, check_res
from data import SessionModel
class UserService:
@staticmethod
def query_users(session: SessionModel):
r = session_get(session, "user", "user")
check_res(r)
return r["data"]
@staticmethod
def add_user(session: SessionModel, name: str, password: str):
r = session_post(session, "user", "user", json={"user": name, "password": password})
check_res(r)
return r
``` |
{
"source": "jiangtiantu/kquant_data",
"score": 2
} |
#### File: kquant_data/demo_future/D01_download_ipo_last_trade_trading.py
```python
from WindPy import w
import sys
import os
import pandas as pd
from kquant_data.wind.wss import download_ipo_last_trade_trading
from kquant_data.xio.csv import write_data_dataframe, read_data_dataframe
from kquant_data.config import __CONFIG_H5_FUT_SECTOR_DIR__
from kquant_data.wind.wset import read_constituent
# 解决Python 3.6的pandas不支持中文路径的问题
print(sys.getfilesystemencoding()) # 查看修改前的
try:
sys._enablelegacywindowsfsencoding() # 修改
print(sys.getfilesystemencoding()) # 查看修改后的
except:
pass
def process_dir2file(w, mydir, myfile):
df = read_data_dataframe(myfile)
all_set = set(df.index)
for dirpath, dirnames, filenames in os.walk(mydir):
for filename in filenames:
# 这个日期需要记得修改
if filename < "2017-01-01.csv":
continue
filepath = os.path.join(dirpath, filename)
df1 = read_constituent(filepath)
# print(filepath)
if df1 is None:
continue
if df1.empty:
continue
curr_set = set(df1['wind_code'])
diff_set = curr_set - all_set
if len(diff_set) == 0:
continue
print(filepath)
df2 = download_ipo_last_trade_trading(w, list(diff_set))
df = pd.concat([df, df2])
all_set = set(df.index)
# 出于安全考虑,还是每次都保存
write_data_dataframe(myfile, df)
df['wind_code'] = df.index
df.sort_values(by=['ipo_date', 'wind_code'], inplace=True)
del df['wind_code']
write_data_dataframe(myfile, df)
if __name__ == '__main__':
w.start()
# 先读取数据,合并,找不同,然后下单
outputFile = os.path.join(__CONFIG_H5_FUT_SECTOR_DIR__, 'ipo_last_trade_trading.csv')
path = os.path.join(__CONFIG_H5_FUT_SECTOR_DIR__, '郑商所全部品种')
process_dir2file(w, path, outputFile)
path = os.path.join(__CONFIG_H5_FUT_SECTOR_DIR__, '中金所全部品种')
process_dir2file(w, path, outputFile)
path = os.path.join(__CONFIG_H5_FUT_SECTOR_DIR__, '上期所全部品种')
process_dir2file(w, path, outputFile)
path = os.path.join(__CONFIG_H5_FUT_SECTOR_DIR__, '大商所全部品种')
process_dir2file(w, path, outputFile)
```
#### File: kquant_data/demo_future/E01_download_daily.py
```python
import os
import sys
from datetime import datetime, timedelta
import pandas as pd
from WindPy import w
from kquant_data.config import __CONFIG_TDAYS_SHFE_FILE__, __CONFIG_H5_FUT_SECTOR_DIR__
from kquant_data.utils.symbol import split_alpha_number
from kquant_data.utils.xdatetime import yyyyMMdd_2_datetime
from kquant_data.wind.tdays import read_tdays
from kquant_data.wind.wsd import download_daily_ohlcv
from kquant_data.wind.wset import read_constituent
from kquant_data.xio.csv import read_datetime_dataframe
# 解决Python 3.6的pandas不支持中文路径的问题
print(sys.getfilesystemencoding()) # 查看修改前的
try:
sys._enablelegacywindowsfsencoding() # 修改
print(sys.getfilesystemencoding()) # 查看修改后的
except:
pass
def read_constituent_at_date(dirpath, date):
date_csv = '%s.csv' % date.strftime('%Y-%m-%d')
path = os.path.join(dirpath, date_csv)
df = read_constituent(path)
if df is None:
# 对于指数数据,长期在线
date_csv = '0.csv'
path = os.path.join(dirpath, date_csv)
df = read_constituent(path)
return df
def merge_constituent_date(constituent, ipo_last_trade, first_last):
constituent_dt = constituent.merge(ipo_last_trade, how='left', left_on='wind_code', right_on='wind_code')
if first_last is None:
constituent_dt['first'] = pd.NaT
constituent_dt['last'] = pd.NaT
else:
constituent_dt = constituent_dt.merge(first_last, how='left', left_on='wind_code', right_index=True)
constituent_dt['start'] = constituent_dt.apply(lambda row: max(row['ipo_date'], row['last']), axis=1)
constituent_dt['end'] = constituent_dt.apply(lambda row: min(row['lasttrade_date'], datetime.today()), axis=1)
# 没有结束时间的,调整成当前时间
constituent_dt['end'] = constituent_dt['end'].fillna(datetime.today())
if True:
# 不下载仿真合约
constituent_dt = constituent_dt[~constituent_dt['sec_name'].str.contains('仿真')]
return constituent_dt
def download_constituent_daily(w, dirpath, date, ipo_last_trade, first_last, wind_code_set, fields):
constituent = read_constituent_at_date(dirpath, date)
if constituent is None:
# 没有对应的板块,因当是与上次一样导致
# 没关系,上次数据应当已经下载过了
return wind_code_set
constituent_dt = merge_constituent_date(constituent, ipo_last_trade, first_last)
for i in range(constituent_dt.shape[0]):
row = constituent_dt.iloc[i]
wind_code = row['wind_code']
# 当前会话,不重复下载
if wind_code in wind_code_set:
continue
wind_code_set.add(wind_code)
# 时间已经到期了,不重复下载
# 这里使用日期,不考虑时间
if row['start'].date() == row['end'].date():
continue
product, num = split_alpha_number(wind_code)
path_dir = os.path.join(root_path, product)
if not os.path.exists(path_dir):
os.mkdir(path_dir)
path_csv = os.path.join(path_dir, '%s.csv' % wind_code)
try:
df_old = pd.read_csv(path_csv, index_col=0, parse_dates=True)
# 合并前先删除空数据
df_old.dropna(axis=0, how='all', thresh=3, inplace=True)
except:
df_old = None
print(row)
df_new, wind_code = download_daily_ohlcv(w, wind_code, row['start'], row['end'], fields)
df = pd.concat([df_old, df_new])
df = df[~df.index.duplicated(keep='last')]
print(path_csv)
df.to_csv(path_csv)
return wind_code_set
if __name__ == '__main__':
w.start()
# 注意,数据中有可能出现0
path_ipo_last_trade = os.path.join(__CONFIG_H5_FUT_SECTOR_DIR__, 'ipo_last_trade_trading.csv')
ipo_last_trade = pd.read_csv(path_ipo_last_trade)
ipo_last_trade['ipo_date'] = ipo_last_trade['ipo_date'].apply(lambda x: yyyyMMdd_2_datetime(x))
ipo_last_trade['lasttrade_date'] = ipo_last_trade['lasttrade_date'].apply(lambda x: yyyyMMdd_2_datetime(x))
root_path = r'D:\DATA_FUT\86400'
path_first_last = os.path.join(root_path, 'first_last.csv')
first_last = read_datetime_dataframe(path_first_last)
date_str = (datetime.today() - timedelta(days=1)).strftime('%Y-%m-%d')
# 设置至少30天比较靠谱
start_str = (datetime.today() - timedelta(days=30)).strftime('%Y-%m-%d')
trading_days = read_tdays(__CONFIG_TDAYS_SHFE_FILE__)
trading_days = trading_days[start_str:date_str]
wind_code_set = set()
if True:
fields = 'open,high,low,close,volume,amt,oi,settle'
dirpath = os.path.join(__CONFIG_H5_FUT_SECTOR_DIR__, '中金所全部品种')
for i in range(len(trading_days)):
wind_code_set = download_constituent_daily(w, dirpath, trading_days['date'][i], ipo_last_trade, first_last,
wind_code_set, fields)
dirpath = os.path.join(__CONFIG_H5_FUT_SECTOR_DIR__, '上期所全部品种')
for i in range(len(trading_days)):
wind_code_set = download_constituent_daily(w, dirpath, trading_days['date'][i], ipo_last_trade, first_last,
wind_code_set, fields)
dirpath = os.path.join(__CONFIG_H5_FUT_SECTOR_DIR__, '大商所全部品种')
for i in range(len(trading_days)):
wind_code_set = download_constituent_daily(w, dirpath, trading_days['date'][i], ipo_last_trade, first_last,
wind_code_set, fields)
dirpath = os.path.join(__CONFIG_H5_FUT_SECTOR_DIR__, '郑商所全部品种')
for i in range(len(trading_days)):
wind_code_set = download_constituent_daily(w, dirpath, trading_days['date'][i], ipo_last_trade, first_last,
wind_code_set, fields)
# 只要对应的创建南华期货商品指数的板块文件夹,然后在里面建立一个0.csv表示每天都有,不会退市
# 然后在ipo_last_trade_trading.csv中标记上市日期与结束日期即可
if True:
fields = 'open,high,low,close,volume'
dirpath = os.path.join(__CONFIG_H5_FUT_SECTOR_DIR__, '南华期货商品指数')
for i in range(len(trading_days)):
wind_code_set = download_constituent_daily(w, dirpath, trading_days['date'][i], ipo_last_trade, first_last,
wind_code_set, fields)
```
#### File: demo_stock/A_1day/B03_merge.py
```python
import os
from kquant_data.config import __CONFIG_H5_STK_DIR__, __CONFIG_TDX_STK_DIR__, __CONFIG_H5_STK_DIVIDEND_DIR__
from kquant_data.processing.utils import filter_dataframe
from kquant_data.stock.stock import read_h5_tdx
from kquant_data.processing.MergeBar import MergeBar
class MergeDataStock(MergeBar):
def init_datetime(self):
df = read_h5_tdx('sh', '000001', self.bar_size, __CONFIG_H5_STK_DIR__, __CONFIG_TDX_STK_DIR__,
__CONFIG_H5_STK_DIVIDEND_DIR__)
# 少了8年的数据就不崩溃了,但这种方法肯定有问题
df = filter_dataframe(df, 'DateTime', "2005", None, fields=['DateTime'])
# 可以保存,也可以不保存
self.datetime = df
super(MergeDataStock, self).init_datetime()
def init_fields(self):
self.fields = ['Open', 'High', 'Low', 'Close', 'Volume', 'Amount', 'backward_factor', 'forward_factor']
def read_data(self, market, code, bar_size):
df = read_h5_tdx(market, code, self.bar_size, __CONFIG_H5_STK_DIR__, __CONFIG_TDX_STK_DIR__,
__CONFIG_H5_STK_DIVIDEND_DIR__)
return df
if __name__ == '__main__':
# 导出日线数据
path = os.path.join(__CONFIG_H5_STK_DIR__, "1day")
mdf = MergeDataStock(path)
mdf.merge()
mdf.hmerge()
mdf.clear()
```
#### File: demo_stock/B_5min/A02_concat_h5.py
```python
import os
import pandas as pd
from kquant_data.config import __CONFIG_H5_STK_DIR__, __CONFIG_H5_STK_DIVIDEND_DIR__
from kquant_data.stock.stock import merge_adjust_factor, bars_to_h5
from kquant_data.processing.utils import filter_dataframe, multiprocessing_convert
from kquant_data.stock.symbol import get_folder_symbols
def _export_data(rule, _input, output, instruments, i):
t = instruments.iloc[i]
print("%d %s" % (i, t['local_symbol']))
path_new = os.path.join(__CONFIG_H5_STK_DIR__, _input, t['market'], "%s.h5" % t['local_symbol'])
# 这里不应当出错,因为之前已经导出过数据到
df_new = pd.read_hdf(path_new)
if df_new is None:
return None
df_new = filter_dataframe(df_new, 'DateTime', None, None, None)
path_old = os.path.join(__CONFIG_H5_STK_DIR__, output, t['market'], "%s.h5" % t['local_symbol'])
try:
# 没有以前的数据
df_old = pd.read_hdf(path_old)
if df_old is None:
df = df_new
else:
df_old = filter_dataframe(df_old, 'DateTime', None, None, None)
# 数据合并,不能简单的合并
# 需要保留老的,新的重复的地方忽略
last_ts = df_old.index[-1]
df_new2 = df_new[last_ts:][1:]
df = pd.concat([df_old, df_new2])
except:
df = df_new
# 有可能没有除权文件
div_path = os.path.join(__CONFIG_H5_STK_DIVIDEND_DIR__, "%s.h5" % t['local_symbol'])
try:
div = pd.read_hdf(div_path)
div = filter_dataframe(div, 'time', None, None, None)
df = merge_adjust_factor(df, div)
except:
# 这里一般是文件没找到,表示没有除权信息
df['backward_factor'] = 1
df['forward_factor'] = 1
bars_to_h5(path_old, df)
if __name__ == '__main__':
# 此合并h5的代码已经废弃不用
_input = '5min_lc5'
_ouput = '5min'
instruments = get_folder_symbols(__CONFIG_H5_STK_DIR__, _input)
multiprocessing_convert(True, '5min', _input, _ouput, instruments, _export_data)
# 下断点用
debug = 1
```
#### File: kquant_data/processing/merge.py
```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
指定数据目录,生成对应的合约行业数据
"""
import os
import numpy as np
import pandas as pd
from ..api import all_instruments, get_datetime
from ..config import __CONFIG_H5_STK_DIR__, __CONFIG_H5_STK_SECTOR_DIR__, __CONFIG_H5_STK_FACTOR_DIR__, \
__CONFIG_H5_STK_WEIGHT_DIR__
from .utils import expand_dataframe_according_to
from ..utils.xdatetime import tic, toc
from ..wind.wset import read_sectorconstituent_from_dir, read_indexconstituent_from_dir
from ..xio.h5 import write_dataframe_set_dtype_remove_head
from ..xio.csv import read_data_dataframe
def load_sector(path, value):
"""
加载指定目录的数据,并标记
通过unstack将数字设置成值
:param path:
:param value:
:return:
"""
df = read_sectorconstituent_from_dir(path)
df['value'] = value
df = df.set_index(df['_datetime_'])
df = df.set_index([df.index, df['wind_code']], drop=False)
df = df['value'].unstack()
return df
def load_sectors(fold_path):
"""
带子目录数据的加载
将数据ID设置成值
:param fold_path:
:return:
"""
path_ = '%s.csv' % fold_path
sectors = pd.read_csv(path_, encoding='utf-8-sig')
df = None
for i in range(0, len(sectors)):
print(sectors.iloc[i, :])
sec_name = sectors.ix[i, 'sec_name']
id = sectors.ix[i, 'ID']
foldpath = os.path.join(fold_path, sec_name)
df_tmp = read_sectorconstituent_from_dir(foldpath)
df_tmp['value'] = id
if df is None:
df = df_tmp
else:
df = pd.concat([df, df_tmp])
df = df.set_index(df['_datetime_'])
df = df.set_index([df.index, df['wind_code']], drop=False)
df = df['value'].unstack()
return df
def load_index_weight(path):
"""
加载指定目录的数据
通过unstack将数字设置成值
:param path:
:param value:
:return:
"""
df = read_indexconstituent_from_dir(path)
df = df.set_index(df['_datetime_'])
df = df.set_index([df.index, df['wind_code']], drop=False)
df = df['i_weight'].unstack()
return df
def merge_sector(rule, sector_name, dataset_name):
"""
合并一级文件夹
:param rule:
:param sector_name:
:param dataset_name:
:return:
"""
path = os.path.join(__CONFIG_H5_STK_DIR__, rule, 'Symbol.csv')
symbols = all_instruments(path)
path = os.path.join(__CONFIG_H5_STK_DIR__, rule, 'DateTime.csv')
DateTime = get_datetime(path)
tic()
path = os.path.join(__CONFIG_H5_STK_SECTOR_DIR__, sector_name)
df = load_sector(path, 1)
print("数据加载完成")
toc()
df = expand_dataframe_according_to(df, DateTime.index, symbols['wind_code'])
# 有些股票从来没有被ST过,比如浦发银行,或一些新股
df.fillna(0, inplace=True)
print("数据加载完成")
toc()
path = os.path.join(__CONFIG_H5_STK_DIR__, rule, "%s.h5" % dataset_name)
write_dataframe_set_dtype_remove_head(path, df, np.int8, dataset_name)
toc()
def merge_sectors(rule, sector_name, dataset_name):
"""
合并二级文件夹
:param rule:
:param sector_name:
:param dataset_name:
:return:
"""
path = os.path.join(__CONFIG_H5_STK_DIR__, rule, 'Symbol.csv')
symbols = all_instruments(path)
path = os.path.join(__CONFIG_H5_STK_DIR__, rule, 'DateTime.csv')
DateTime = get_datetime(path)
tic()
path = os.path.join(__CONFIG_H5_STK_SECTOR_DIR__, sector_name)
df = load_sectors(path)
print("数据加载完成")
toc()
df = expand_dataframe_according_to(df, DateTime.index, symbols['wind_code'])
# df.to_csv(r"D:\1.csv")
# 有些股票从来没有被ST过,比如浦发银行,或一些新股
df.fillna(0, inplace=True)
print("数据加载完成")
toc()
path = os.path.join(__CONFIG_H5_STK_DIR__, rule, "%s.h5" % dataset_name)
write_dataframe_set_dtype_remove_head(path, df, np.int16, dataset_name)
toc()
def merge_report(rule, field, dataset_name):
path = os.path.join(__CONFIG_H5_STK_DIR__, rule, 'Symbol.csv')
symbols = all_instruments(path)
path = os.path.join(__CONFIG_H5_STK_DIR__, rule, 'DateTime.csv')
DateTime = get_datetime(path)
tic()
path = os.path.join(__CONFIG_H5_STK_FACTOR_DIR__, field)
df = None
cnt = 0
for dirpath, dirnames, filenames in os.walk(path):
# 只处理目录
for filename in filenames:
print(filename)
filepath = os.path.join(dirpath, filename)
df_ = read_data_dataframe(filepath)
df_tmp = df_.stack()
if df is None:
df = df_tmp
else:
df = pd.concat([df, df_tmp])
cnt += 1
# if cnt > 10:
# break
df = df.unstack()
df.fillna(method='ffill', inplace=True)
print("数据加载完成")
toc()
df = expand_dataframe_according_to(df, DateTime.index, symbols['wind_code'])
#
path = os.path.join(__CONFIG_H5_STK_DIR__, rule, "%s.h5" % dataset_name)
write_dataframe_set_dtype_remove_head(path, df, None, field)
def merge_weight_internal(symbols, DateTime, wind_code):
"""
合并一级文件夹
:param rule:
:param sector_name:
:param dataset_name:
:return:
"""
tic()
path = os.path.join(__CONFIG_H5_STK_WEIGHT_DIR__, wind_code)
df = load_index_weight(path)
print("数据加载完成")
# 与行业不同,行业是全部有数据,它是有一部分有数据,所以直接用fillna会出错,需要先填充
df.fillna(-1, inplace=True)
toc()
# 原始数据比较简单,但与行业板块数据又不一样
# 1.每年的约定时间会调整成份股
# 2.每天的值都不一样
# 约定nan表示不属于成份,0表示属于成份,但权重为0
df = expand_dataframe_according_to(df, DateTime.index, symbols['wind_code'])
# -1表示特殊数据,处理下
df.replace(-1, np.nan, inplace=True)
print("数据加载完成")
toc()
return df
def merge_weight(rule, wind_code, dataset_name):
"""
合并一级文件夹
:param rule:
:param sector_name:
:param dataset_name:
:return:
"""
path = os.path.join(__CONFIG_H5_STK_DIR__, rule, 'Symbol.csv')
symbols = all_instruments(path)
path = os.path.join(__CONFIG_H5_STK_DIR__, rule, 'DateTime.csv')
DateTime = get_datetime(path)
df = merge_weight_internal(symbols, DateTime, wind_code)
path = os.path.join(__CONFIG_H5_STK_DIR__, rule, "%s.h5" % dataset_name)
write_dataframe_set_dtype_remove_head(path, df, None, dataset_name)
toc()
```
#### File: kquant_data/stock/dzh.py
```python
import urllib
import urllib.request
import numpy as np
from struct import *
from ..xio.h5 import write_dataframe_set_struct_keep_head
dzh_h5_type = np.dtype([
('time', np.uint64),
('pre_day', np.float64),
('pre_close', np.float64),
('split', np.float64),
('purchase', np.float64),
('purchase_price', np.float64),
('dividend', np.float64),
('dr_pre_close', np.float64),
('dr_factor', np.float64),
('backward_factor', np.float64),
('forward_factor', np.float64),
])
def dividend_to_h5(input_path, data):
write_dataframe_set_struct_keep_head(input_path, data, dzh_h5_type, 'Dividend')
return
class DzhFetcher(object):
_IPS = ('192.168.127.12', '172.16.58.3')
_PATH = None
_FILE_PATH = None
def __init__(self, filepath=None):
self.ips = list(self._IPS)
self._fetched = False
self._FILE_PATH = filepath
def fetch_next_server(self):
self.ips.pop
if len(self.ips) == 0:
raise FileNotFoundError
return self.fetch()
def fetch(self):
if self._FILE_PATH is None:
return self._fetch_url()
else:
return self._fetch_file()
def _fetch_url(self):
try:
r = urllib
data = r.read()
self.f = io.StringIO(data)
self._fetched = True
except urllib.URLError:
return self.fetch_next_server()
def _fetch_file(self):
try:
self.f = open(self._FILE_PATH, 'rb')
self._fetched = True
except OSError as e:
raise e
def data_url(self):
assert self._PATH, "No file path."
if len(self.ips) == 0:
return None
return "http://" + self.ips[-1] + self._PATH
class DzhDividend(DzhFetcher):
"""大智慧除权数据"""
_PATH = '/platform/download/PWR/full.PWR'
def read(self):
"""Generator of 大智慧除权数据
Example of yield data:
symbol: 'SZ000001'
dividends: [{ :date_ex_dividend => '1992-03-23',
:split => 0.500,
:purchase => 0.000,
:purchase_price => 0.000,
:dividend => 0.200 }... ]
"""
if not self._fetched:
self.fetch()
# skip head
self.f.seek(12, 0)
try:
while True:
yield self._read_symbol()
except EOFError:
raise StopIteration
finally:
self.f.close()
# except Exception as e:
# print(e)
def _read_symbol(self):
dividends = []
rawsymbol = self.f.read(16)
if rawsymbol == b'':
raise EOFError
symbol = unpack('16s', rawsymbol)[0].replace(b'\x00', b'')
rawdate = self.f.read(4)
dt = np.dtype([('time', np.int32),
('split', np.float32),
('purchase', np.float32),
('purchase_price', np.float32),
('dividend', np.float32)])
while (rawdate) != b"\xff" * 4:
dividend = np.frombuffer(rawdate + self.f.read(16), dtype=dt)
dividends.append(dividend)
rawdate = self.f.read(4)
if rawdate == b'':
break
return (symbol, np.fromiter(dividends, dtype=dt))
def download_pwr(
local_file=r"D:\dzh2\Download\PWR\full.PWR",
url='http://192.168.127.12/platform/download/PWR/full.PWR',
proxy=None):
if proxy is not None:
# create the object, assign it to a variable
proxy = urllib.request.ProxyHandler(proxy) # {'http': '192.168.1.60:808'}
# construct a new opener using your proxy settings
opener = urllib.request.build_opener(proxy)
# install the openen on the module-level
urllib.request.install_opener(opener)
# 这里需要处理一下,除权信息已经没法直接下载了
f = urllib.request.urlopen(url)
data = f.read()
with open(local_file, "wb") as code:
code.write(data)
print(u'下载除权除息信息完成')
```
#### File: kquant_data/stock/stock.py
```python
import multiprocessing
import os
from functools import partial
import pandas as pd
from ..processing.utils import filter_dataframe
from ..utils.xdatetime import datetime_2_yyyyMMdd____, yyyyMMddHHmm_2_datetime, tic, toc, yyyyMMdd_2_datetime
from .dzh import DzhDividend, dividend_to_h5
from .tdx import read_file, bars_to_h5, get_h5_86400_path, get_tdx_path
from .symbol import is_stock
def sort_dividend(divs):
"""
对权息信息按时间排序
:param divs:
:return:
"""
if len(divs) > 0:
df = pd.DataFrame(divs)
df = df.sort_values(by='time')
df.time = df.time.apply(lambda x: pd.datetime.utcfromtimestamp(x))
df = df.set_index('time')
return df
def factor(daily, divs, ndigits):
"""
计算得到向后复权因子
发现这种算法没有错,但很多股票还是有一些与万得对应不上
除权应子的算法应当是 交易所会发布前收盘价与收盘价进行比较就是除权因子
但上交所网站上前收盘价并不好查,因为按分类,有些还是不好做
可以对照一下通达信与万得的行情,哪种价格对应得上
:param daily:
:param divs:
:param ndigits: 股票保留两位小数,基金保留三位小数
:return:
"""
# 排序复权因子,一定要用,因为会更新时间格式
if False:
df = sort_dividend(divs)
else:
df = divs
df.loc[:, 'time'] = df.loc[:, 'time'].apply(lambda x: yyyyMMdd_2_datetime(x))
df = df.set_index('time')
# 过滤一下,用来计算除权价
daily_part = daily[['DateTime', 'DateTime', 'Close']]
daily_part.columns = ['time', 'pre_day', 'pre_close']
first_day = daily_part.index[0]
last_day = daily_part.index[-1]
# 无语,停牌会选不出来,比如说SZ000001,会有日期对应不上,所以只能先合并然后再处理
daily_div = pd.merge(daily_part, df, how='outer', left_index=True, right_index=True, sort=False)
# 由于可能出现在停牌期公布除权除息,所以需要补上除权那天的收盘价
daily_div['pre_close'] = daily_div['pre_close'].fillna(method='pad', limit=1)
daily_div = daily_div.fillna(method='pad', limit=1)
daily_div[['time', 'pre_day', 'pre_close']] = daily_div[['time', 'pre_day', 'pre_close']].shift(1)
daily_div[['split', 'purchase', 'purchase_price', 'dividend']] = daily_div[
['split', 'purchase', 'purchase_price', 'dividend']].fillna(method='bfill', limit=1)
# 预处理后只取需要的部分
df = daily_div.loc[df.index]
# 发现部分股票会提前公布除权除息信息,导致后面比例出错
df = df.fillna(0)
# 除权价
df['dr_pre_close'] = (df['pre_close'] - df['dividend'] + df['purchase'] * df['purchase_price']) / (
1 + df['split'] + df['purchase'])
# 要做一次四舍五入,不然除权因子不对,2是不是不够,需要用到3呢?
df['dr_pre_close'] = df['dr_pre_close'].apply(lambda x: round(x, ndigits))
# 除权因子
df['dr_factor'] = df['pre_close'] / df['dr_pre_close']
# 将超出日线还没有实现或没有行情的除权因子改成1,注意可能因为通达信没有完全下载数据而导致出错
df.loc[df.index > last_day, 'dr_factor'] = 1
# 这个地方会有风险,能拿到更全的数据最好
df.loc[df.index < first_day, 'dr_factor'] = 1
# 在最前插件一条特殊的记录,用于表示在第一次除权之前系数为1
# 由于不知道上市是哪一天,只好用最小日期
first_ = pd.DataFrame({'dr_factor': 1}, index=[pd.datetime(1900, 1, 1)])
df = df.append(first_)
df = df.sort_index()
df['time'] = df.index.map(datetime_2_yyyyMMdd____)
# 向后复权因子,注意对除权因子的累乘
df['backward_factor'] = df['dr_factor'].cumprod()
# 向前复权因子
df['forward_factor'] = df['backward_factor'] / float(df['backward_factor'][-1:])
df = df[['time', 'pre_day', 'pre_close',
'split', 'purchase', 'purchase_price', 'dividend',
'dr_pre_close', 'dr_factor', 'backward_factor', 'forward_factor']]
return df
def adjust(df, adjust_type=None):
if adjust_type is None:
return df
adjust_type = adjust_type.lower()
if adjust_type.startswith('f'):
df['Open'] = df['Open'] * df['forward_factor']
df['High'] = df['High'] * df['forward_factor']
df['Low'] = df['Low'] * df['forward_factor']
df['Close'] = df['Close'] * df['forward_factor']
return df
if adjust_type.startswith('b'):
df['Open'] = df['Open'] * df['backward_factor']
df['High'] = df['High'] * df['backward_factor']
df['Low'] = df['Low'] * df['backward_factor']
df['Close'] = df['Close'] * df['backward_factor']
return df
return df
def merge_adjust_factor(df, div):
"""
从通达信中读出的原始数据,与从除权表中读出的原始数据合并处理成一个表,可以将此表另行保存备用
:param df:
:param div:
:return:
"""
div['index_datetime'] = div['time'].apply(yyyyMMddHHmm_2_datetime)
div = div.set_index('index_datetime')
div = div[['backward_factor', 'forward_factor']]
div.columns = ['backward_factor_tmp', 'forward_factor_tmp']
df_div = pd.merge(df, div, left_index=True, right_index=True, how='outer')
df_div[['backward_factor', 'forward_factor']] = df_div[['backward_factor_tmp', 'forward_factor_tmp']].fillna(
method='ffill')
del df_div['backward_factor_tmp']
del df_div['forward_factor_tmp']
df_div = df_div.loc[df.index]
return df_div
def read_h5_tdx(market, code, bar_size, h5_path, tdx_path, div_path):
"""
指定时间,市场,代码,返回数据
:param market:
:param code:
:param bar_size:
:return:
"""
if bar_size == 86400:
# 日线优先从h5格式中取
_h5_path = os.path.join(h5_path, get_h5_86400_path(market, code, bar_size))
try:
df = pd.read_hdf(_h5_path)
# print(df.dtypes)
df = filter_dataframe(df, 'DateTime', None, None, None)
return df
except Exception as e:
# 数据没有取出来
print(e)
pass
else:
# 其它数据就直接取原始数据,这样省事
pass
_tdx_path = os.path.join(tdx_path, get_tdx_path(market, code, bar_size))
try:
df = read_file(_tdx_path)
except FileNotFoundError:
# 没有原始的数据文件
return None
# 有可能没有除权文件
_div_path = os.path.join(div_path, "%s%s.h5" % (market, code))
try:
div = pd.read_hdf(_div_path)
df = merge_adjust_factor(df, div)
except:
# 这里一般是文件没找到,表示没有除权信息
df['backward_factor'] = 1
df['forward_factor'] = 1
return df
def _export_dividend_from_data(tdx_root, dividend_output, daily_output, data):
"""
除权信息的导出,并导出日线
:param tdx_input:
:param dzh_output:
:param daily_output:
:param data:
:return:
"""
symbol = data[0]
divs = data[1]
print(symbol)
if False:
_symbol = symbol.lower().decode('utf-8')
else:
_symbol = symbol
divs = divs[
['datetime', 'songgu_qianzongguben', 'peigu_houzongguben', 'peigujia_qianzongguben',
'hongli_panqianliutong']]
divs.columns = ['time', 'split', 'purchase', 'purchase_price', 'dividend']
# 通达信中记录的是每10股,大智慧中记录的是每1股,这里转成大智慧的格式
divs[['split', 'purchase', 'dividend']] /= 10
# 测试使用
# if _symbol != 'sh510050':
# return
dividend_output_path = os.path.join(dividend_output, _symbol + '.h5')
if _symbol.startswith('sh'):
daily_input_path = os.path.join(tdx_root, 'sh', 'lday', _symbol + '.day')
daily_output_path = os.path.join(daily_output, 'sh', _symbol + '.h5')
else:
daily_input_path = os.path.join(tdx_root, 'sz', 'lday', _symbol + '.day')
daily_output_path = os.path.join(daily_output, 'sz', _symbol + '.h5')
try:
# 宏证证券000562退市了,导致找不到股票行情,但还有除权数据
tdx_daily = read_file(daily_input_path)
ndigits = 2
# 股票两位小数,基金3位小数,如50etf
if not is_stock(_symbol):
ndigits = 3
df_divs = factor(tdx_daily, divs, ndigits)
# 保存时还是把权息给加上,这样少了调用时的合并操作
daily_divs = merge_adjust_factor(tdx_daily, df_divs)
del df_divs['index_datetime']
# 保存
bars_to_h5(daily_output_path, daily_divs)
dividend_to_h5(dividend_output_path, df_divs)
except Exception as e:
print(e)
def export_dividend_daily_dzh(dzh_input, tdx_root, dividend_output, daily_output):
"""
导出除权数据,并同时生成对应的日线数据
:param tdx_input:
:param dzh_input:
:param dzh_output:
:param daily_output:
:return:
"""
io = DzhDividend(dzh_input)
r = io.read()
tic()
multi = False
if multi:
# 多进程并行计算
pool_size = multiprocessing.cpu_count() - 1
pool = multiprocessing.Pool(processes=pool_size)
func = partial(_export_dividend_from_data, tdx_root, dividend_output, daily_output)
pool_outputs = pool.map(func, list(r))
print('Pool:', pool_outputs)
else:
# 单线程
for d in list(r):
_export_dividend_from_data(tdx_root, dividend_output, daily_output, d)
toc()
def export_dividend_daily_gbbq(gbbq_input, tdx_root, dividend_output, daily_output):
"""
导出除权数据,并同时生成对应的日线数据
:param tdx_input:
:param dzh_input:
:param dzh_output:
:param daily_output:
:return:
"""
df = pd.read_csv(gbbq_input, index_col=0, dtype={'code': str})
# 只取除权信息
df = df[df['category'] == 1]
df['exchange'] = df['market'].replace(0, "sz").replace(1, 'sh')
df['symbol'] = df['exchange'] + df['code']
div_list = [(name, group) for name, group in df.groupby(by=['symbol'])]
tic()
multi = True
if multi:
# 多进程并行计算
pool_size = multiprocessing.cpu_count()
if pool_size > 2:
pool_size -= 1
pool = multiprocessing.Pool(processes=pool_size)
func = partial(_export_dividend_from_data, tdx_root, dividend_output, daily_output)
pool_outputs = pool.map(func, div_list)
print('Pool:', pool_outputs)
else:
# 单线程
for d in div_list:
_export_dividend_from_data(tdx_root, dividend_output, daily_output, d)
toc()
```
#### File: kquant_data/stock/tdx.py
```python
import os
import numpy as np
import pandas as pd
from ..xio.h5 import write_dataframe_set_struct_keep_head
from ..utils.xdatetime import yyyyMMddHHmm_2_datetime
# 保存成h5格式时的类型
tdx_h5_type = np.dtype([
('DateTime', np.uint64),
('Open', np.float32),
('High', np.float32),
('Low', np.float32),
('Close', np.float32),
('Amount', np.float32),
('Volume', np.uint32),
('backward_factor', np.float32),
('forward_factor', np.float32),
])
def bars_to_h5(input_path, data): # 保存日线
write_dataframe_set_struct_keep_head(input_path, data, tdx_h5_type, 'BarData')
return
def min_datetime_long(dt):
"""
传入分钟,输出为yyyyMMddHHmm
:param dt:
:return:
"""
# 由于dt << 16会由int变成long,所以转换失败
(tnum, dnum) = divmod(dt, 1 << 16) # 2**16
(ym, res) = divmod(dnum, 2048)
y = ym + 2004
# (m, d) = divmod(res, 100)
h, t = divmod(tnum, 60)
return float(y * 100000000.0 + res * 10000.0 + h * 100 + t * 1)
def day_datetime_long(dt):
"""
传入的数据是日,需要转成分钟,输出为yyyyMMddHHmm
:param dt:
:return:
"""
return float(dt * 10000.0)
def read_file(path, instrument_type='stock'):
"""
http://www.tdx.com.cn/list_66_68.html
通达信本地目录有day/lc1/lc5三种后缀名,两种格式
从通达信官网下载的5分钟后缀只有5这种格式,为了处理方便,时间精度都只到分钟
:param path:
:return:
"""
columns = ['DateTime', 'Open', 'High', 'Low', 'Close', 'Amount', 'Volume', 'na']
file_ext = os.path.splitext(path)[1][1:]
if instrument_type == 'stock':
ohlc_type = {'day': 'i4', '5': 'i4', 'lc1': 'f4', 'lc5': 'f4'}[file_ext]
formats = ['i4'] + [ohlc_type] * 4 + ['f4'] + ['i4'] * 2
elif instrument_type == 'option':
ohlc_type = {'day': 'f4', '5': 'i4', 'lc1': 'f4', 'lc5': 'f4'}[file_ext]
formats = ['i4'] + [ohlc_type] * 4 + ['i4'] + ['i4'] * 2
date_parser = {'day': day_datetime_long,
'5': min_datetime_long,
'lc1': min_datetime_long,
'lc5': min_datetime_long,
}[file_ext]
dtype = np.dtype({'names': columns, 'formats': formats})
data = np.fromfile(path, dtype=dtype)
df = pd.DataFrame(data)
# 为了处理的方便,存一套long类型的时间
df['DateTime'] = df['DateTime'].apply(date_parser)
df['datetime'] = df['DateTime'].apply(yyyyMMddHHmm_2_datetime)
df = df.set_index('datetime')
df = df.drop('na', 1)
# 有两种格式的数据的价格需要调整
if instrument_type == 'stock':
if file_ext == 'day' or file_ext == '5':
tmp = df.tail(10)
r = tmp.Amount / tmp.Volume / tmp.Close
# 为了解决价格扩大了多少倍的问题
type_unit = np.power(10, np.round(np.log10(r))).median()
# 这个地方要考虑到实际情况,不要漏价格,也不要把时间做了除法
df.ix[:, 1:5] = df.ix[:, 1:5] * type_unit
# 转换格式,占用内存更少
df['DateTime'] = df['DateTime'].astype(np.uint64)
df['Open'] = df['Open'].astype(np.float32)
df['High'] = df['High'].astype(np.float32)
df['Low'] = df['Low'].astype(np.float32)
df['Close'] = df['Close'].astype(np.float32)
df['Amount'] = df['Amount'].astype(np.float32)
df['Volume'] = df['Volume'].astype(np.uint32)
# print(df.dtypes)
return df
def file_ext_2_bar_size(file_ext):
"""
:param file_ext:
:return:
"""
ret = {
'day': 86400,
'5': 300,
'lc1': 60,
'lc5': 300,
}[file_ext]
return ret
def bar_size_2_file_ext(bar_size):
ret = {
86400: 'day',
300: 'lc5',
60: 'lc5',
5: '5', # 这是为了读特殊的从通达信网站上下载的数据
}[bar_size]
return ret
def bar_size_2_folder(bar_size):
ret = {
86400: 'lday',
300: 'fzline',
60: 'minline',
5: '5', # 这是为了读特殊的从通达信网站上下载的数据
}[bar_size]
return ret
def get_tdx_path(market, code, bar_size):
# D:\new_hbzq\vipdoc\sh\lday\sh000001.day
# D:\new_hbzq\vipdoc\sh\fzline\sh000001.lc5
# D:\new_hbzq\vipdoc\sh\minline\sh000001.lc1
folder = bar_size_2_folder(bar_size)
file_ext = bar_size_2_file_ext(bar_size)
filename = "%s%s.%s" % (market, code, file_ext)
return os.path.join("vipdoc", market, folder, filename)
def get_h5_86400_path(market, code, bar_size):
# D:\DATA_STK_HDF5\daily\sh\sh000001.h5
filename = "%s%s.h5" % (market, code)
return os.path.join("1day", market, filename)
```
#### File: kquant_data/utils/xdatetime.py
```python
import time
from datetime import datetime
def tic():
"""
对应MATLAB中的tic
:return:
"""
globals()['tt'] = time.clock()
def toc():
"""
对应MATLAB中的toc
:return:
"""
t = time.clock() - globals()['tt']
print('\nElapsed time: %.8f seconds\n' % t)
return t
def yyyyMMddHHmm_2_datetime(dt):
"""
输入一个长整型yyyyMMddhmm,返回对应的时间
:param dt:
:return:
"""
dt = int(dt) # FIXME:在python2下会有问题吗?
(yyyyMMdd, hh) = divmod(dt, 10000)
(yyyy, MMdd) = divmod(yyyyMMdd, 10000)
(MM, dd) = divmod(MMdd, 100)
(hh, mm) = divmod(hh, 100)
return datetime(yyyy, MM, dd, hh, mm)
def yyyyMMdd_2_datetime(dt):
yyyyMMdd = int(dt)
(yyyy, MMdd) = divmod(yyyyMMdd, 10000)
(MM, dd) = divmod(MMdd, 100)
if yyyy == 0:
return None
return datetime(yyyy, MM, dd, 0, 0)
def datetime_2_yyyyMMdd____(dt):
"""
将时间转换成float类型
:param dt:
:return:
"""
t = dt.timetuple()
return float((t.tm_year * 10000.0 + t.tm_mon * 100 + t.tm_mday) * 10000.0)
def datetime_2_yyyyMMdd(dt):
"""
将时间转换成float类型
:param dt:
:return:
"""
t = dt.timetuple()
return int((t.tm_year * 10000.0 + t.tm_mon * 100 + t.tm_mday))
def datetime_2_MM(dt):
"""
将时间转换成float类型
:param dt:
:return:
"""
t = dt.timetuple()
return t.tm_mon
def datetime_2_yyyy(dt):
"""
将时间转换成float类型
:param dt:
:return:
"""
t = dt.timetuple()
return t.tm_year
def datetime_2_yyyyMMddHHmm(dt):
"""
将时间转换成float类型
:param dt:
:return:
"""
t = dt.timetuple()
return float((t.tm_year * 10000.0 + t.tm_mon * 100 + t.tm_mday) * 10000.0) + t.tm_hour * 100 + t.tm_min
def datetime_keep_yyyyMMdd(dt):
"""
由于万得取出来时间有会带一个0500的小尾巴
这个在处理数据时可能会导致问题,所以处理一下
:return:
"""
t = dt.timetuple()
return datetime(t.tm_year, t.tm_mon, t.tm_mday, 0, 0)
```
#### File: kquant_data/wind_resume/tdays.py
```python
import pandas as pd
from ..wind.tdays import read_tdays, download_tdays, write_tdays
def resume_download_tdays(w, enddate, path):
"""
增量下载
:return:
"""
df_old = read_tdays(path)
if df_old is None:
startdate = '1991-01-01'
else:
startdate = df_old.index[-1]
df_new = download_tdays(w, startdate, enddate, option="")
df = pd.concat([df_old, df_new])
# 可能要‘去重’,也可能None不能参与合并
write_tdays(path, df)
``` |
{
"source": "jiangtianyu2009/bop",
"score": 3
} |
#### File: bop/Comicbook/test_sort.py
```python
from sort import Sort
import random
import datetime
import copy
def test_sort_result():
unsortedlist = []
for i in range(1000):
unsortedlist.append(random.randint(1, 999))
list_bubble = copy.deepcopy(unsortedlist)
list_quick = copy.deepcopy(unsortedlist)
print('list_bubble: ', list_bubble)
print('list_quick : ', list_quick)
# Execute sort
list_bubble_sorted = Sort().bubbleSort(list_bubble)
print('list_bubble_sorted: ', list_bubble_sorted)
list_quick_sorted = Sort().quickSort(list_quick)
print('list_quick_sorted : ', list_quick_sorted)
assert list_quick_sorted == list_bubble_sorted
if __name__ == "__main__":
test_sort_result()
# unsortedlist = []
# for i in range(20):
# unsortedlist.append(random.randint(1, 99))
# # Quick sort time cost
# quick_time_start = datetime.datetime.now()
# sortedlist = Sort().bubbleSort(unsortedlist)
# quick_time_end = datetime.datetime.now()
# quick_time_delta = quick_time_end - quick_time_start
# print(quick_time_delta.total_seconds())
```
#### File: bop/Leetcode/Backspace_String_Compare.py
```python
class Solution:
def backspaceCompare(self, S, T):
"""
:type S: str
:type T: str
:rtype: bool
"""
s = []
t = []
for charsj in S:
if charsj is '#':
if len(s) is not 0:
s.pop()
else:
s.append(charsj)
for chartj in T:
if chartj is '#':
if len(t) is not 0:
t.pop()
else:
t.append(chartj)
print(s)
print(t)
if s == t:
return True
else:
return False
if __name__ == '__main__':
print(Solution().backspaceCompare("a##c", "#a#c"))
```
#### File: bop/Leetcode/Best_Time_to_Buy_and_Sell_Stock_II.py
```python
class Solution:
def maxProfit(self, prices: List[int]) -> int:
sum = 0
p = 1
while p < len(prices):
if prices[p] > prices[p - 1]:
sum = sum + prices[p] - prices[p - 1]
p = p + 1
return sum
```
#### File: bop/Leetcode/First_Unique_Character_in_a_String.py
```python
class Solution:
def firstUniqChar(self, s):
dict_char = {}
for char in s:
if char in dict_char:
dict_char[char] += 1
else:
dict_char[char] = 1
print(dict_char)
key = self.firstUniqCharinDict(dict_char)
index = self.getIndex(key, s)
return index
def firstUniqCharinDict(self, dict_char):
for key in dict_char:
if dict_char[key] == 1:
print(key)
return key
return None
def getIndex(self, key, s):
if key is None:
return -1
else:
index = s.index(key)
print(index)
return index
if __name__ == '__main__':
s = "loveleetcode"
Solution().firstUniqChar(s)
```
#### File: bop/Leetcode/Flipping_an_Image.py
```python
class Solution:
def flipAndInvertImage(self, A):
"""
:type A: List[List[int]]
:rtype: List[List[int]]
"""
out = []
for subA in A:
outA = Solution.revList(self, subA[::-1])
out.append(outA)
return out
def revList(self, subA):
lsA = []
for char in subA:
if char == 1:
outch = 0
else:
outch = 1
lsA.append(outch)
return lsA
```
#### File: bop/Leetcode/Group_Anagrams.py
```python
from typing import List
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
dict_list = []
ret_list = []
ret_dict = {}
dict_dict = {}
for word in strs:
dict_new = {}
for char in word:
if char not in dict_new.keys():
dict_new[char] = 1
else:
dict_new[char] = dict_new[char] + 1
if dict_new not in dict_list:
dict_list.append(dict_new)
print(dict_list)
for i in range(len(dict_list)):
dict_dict[i] = dict_list[i]
ret_dict[i] = []
print(dict_dict)
print(ret_dict)
for word in strs:
dict_new = {}
for char in word:
if char not in dict_new.keys():
dict_new[char] = 1
else:
dict_new[char] = dict_new[char] + 1
for i in range(len(dict_list)):
if dict_dict[i] == dict_new:
ret_dict[i].append(word)
print(ret_dict)
for i in range(len(dict_list)):
ret_list.append(ret_dict[i])
print(ret_list)
return ret_list
if __name__ == "__main__":
assert {"a": 1, "b": 1} == {"b": 1, "a": 1} # Test dict order
assert Solution().groupAnagrams(["ddddddddddg", "dgggggggggg"]) \
== [['ddddddddddg'], ['dgggggggggg']]
assert Solution().groupAnagrams(["eat", "tea", "tan", "ate", "nat", "bat"]) \
== [['eat', 'tea', 'ate'], ['tan', 'nat'], ['bat']]
assert Solution().groupAnagrams([""]) == [[""]]
assert Solution().groupAnagrams(["a"]) == [["a"]]
```
#### File: bop/Leetcode/Leaf-Similar_Trees.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Tree:
def buildTree(self, elemlist):
nodelist = []
for elem in elemlist:
nodelist.append(TreeNode(elem))
i = 0
wiza = True
for j in range(1, len(nodelist)):
if wiza:
if nodelist[j].val is not None:
nodelist[i].left = nodelist[j]
else:
if nodelist[j].val is not None:
nodelist[i].right = nodelist[j]
i = i + 1
wiza = not wiza
return nodelist[0]
class Solution:
def leafSimilar(self, root1, root2):
"""
:type root1: TreeNode
:type root2: TreeNode
:rtype: bool
"""
root1_list = []
root2_list = []
self.getleaf(root1, root1_list)
self.getleaf(root2, root2_list)
print(root1_list)
print(root2_list)
isequ = True
for i in range(len(root1_list)):
if root1_list[i] != root2_list[i]:
isequ = False
return isequ
def getleaf(self, root, root_list):
if root.left == root.right == None:
root_list.append(root.val)
elif root.right == None:
self.getleaf(root.left, root_list)
elif root.left == None:
self.getleaf(root.right, root_list)
else:
self.getleaf(root.left, root_list)
self.getleaf(root.right, root_list)
if __name__ == '__main__':
print(Solution().leafSimilar(Tree().buildTree([3, 5, 1, 6, 2, 9, 8, None, None, 7, 4]),
Tree().buildTree([3, 5, 1, 6, 7, 4, 2, None, None, None, None, None, None, 9, 8])))
```
#### File: bop/Leetcode/Length_of_Last_Word.py
```python
class Solution:
def lengthOfLastWord(self, s):
"""
:type s: str
:rtype: int
"""
counter = 0
for char in s:
if char is ' ':
counter = 0
else:
counter = counter + 1
return counter
if __name__ == '__main__':
print(Solution().lengthOfLastWord('a'))
```
#### File: bop/Leetcode/Longest_Substring_Without_Repeating_Characters.py
```python
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
res_array = []
res_list = []
for i, char in enumerate(s):
if char in res_list:
res_array.append(res_list)
res_list = res_list[res_list.index(char) + 1:]
res_list.append(char)
res_array.append(res_list)
res_l_len = []
for res_l in res_array:
res_l_len.append(len(res_l))
return max(res_l_len)
if __name__ == "__main__":
assert Solution().lengthOfLongestSubstring("abcabcbb") == 3
assert Solution().lengthOfLongestSubstring("bbbbb") == 1
assert Solution().lengthOfLongestSubstring("pwwkew") == 3
assert Solution().lengthOfLongestSubstring("dvdf") == 3
```
#### File: bop/Leetcode/Move_Zeroes.py
```python
class Solution:
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
zerocounter = 0
for num in nums:
if num is 0:
zerocounter = zerocounter + 1
deltalens = zerocounter
while zerocounter is not 0:
nums.remove(0)
zerocounter = zerocounter - 1
while deltalens is not 0:
nums.append(0)
deltalens = deltalens - 1
if __name__ == '__main__':
nums = [0, 0, 1]
Solution().moveZeroes(nums)
print(nums)
```
#### File: bop/Leetcode/Number_of_Lines_To_Write_String.py
```python
class Solution:
def numberOfLines(self, widths, S):
"""
:type widths: List[int]
:type S: str
:rtype: List[int]
"""
Line = 0
SUM = 0
for x in S:
CH = widths[ord(x) - 97]
SUM += CH
if SUM > 100:
Line += 1
SUM = CH
return [Line+1, SUM]
```
#### File: bop/Leetcode/Rotate_String.py
```python
class Solution:
def rotateString(self, A, B):
"""
:type A: str
:type B: str
:rtype: bool
"""
output = False
doubleA = A + A
if B in doubleA and len(B) == len(A):
output = True
return output
```
#### File: bop/Leetcode/Squares_of_a_Sorted_Array.py
```python
class Solution:
def sortedSquares(self, A: 'List[int]') -> 'List[int]':
B = []
for elem in A:
B.append(elem * elem)
B.sort()
return B
```
#### File: bop/Leetcode/Two_Sum.py
```python
class Solution:
def twoSum(self, nums, target):
numDict = {}
for index, num in enumerate(nums):
if (target - num) not in numDict:
numDict[num] = index
else:
return numDict[target - num], index
return 0, 0
if __name__ == '__main__':
nums = [2, 7, 11, 15]
target = 9
print(Solution().twoSum(nums, target))
```
#### File: GAE/simple/main.py
```python
__author__ = 'toby'
class student:
name = ''
num = 0
grade = 0
def __init__(self, pname, pnum, pgrade):
self.name = pname
self.num = pnum
self.grade = pgrade
file_object = open('a.txt')
s = []
try:
i = 0
text = file_object.readline()
while (text != ''):
s.append(student(text.split(' ')[1], text.split(
' ')[0], text.split(' ')[2].strip('\n')))
i = i + 1
text = file_object.readline()
finally:
file_object.close()
for si in s:
print si.name
``` |
{
"source": "jiangtianyu2009/goldenshark",
"score": 3
} |
#### File: goldenshark/archive/test_1.py
```python
import json
import os
import subprocess
import requests
from flask import Flask, jsonify, request, send_from_directory
from scrapinghub import ScrapinghubClient
NAME_LIST_URL = ('https://raw.githubusercontent.com/bsonnier/'
'bsonnier.github.io/master/docs/namelist')
API_KEY = '11befd9da9304fecb83dfa114d1926e9'
PROJECT_ID = '252342'
def fetchcodelist():
client = ScrapinghubClient(API_KEY, use_msgpack=False)
project = client.get_project(PROJECT_ID)
for jav_order_job in list(project.jobs.iter_last(
spider='javorder', state='finished')):
javjob = jav_order_job
print(javjob['key'])
jav_order_job = project.jobs.get(javjob['key'])
output = []
search_word = "JUL"
filters = [("code", "contains", [search_word]),
("text", "contains", [search_word])]
for f_ter in filters:
for item in jav_order_job.items.iter(filter=[f_ter]):
output.append(item)
print(output)
namelist = requests.get(NAME_LIST_URL).text.split()
print(namelist)
return output
if __name__ == "__main__":
fetchcodelist()
```
#### File: jiangtianyu2009/goldenshark/goldenshark.py
```python
import json
import os
import subprocess
import csv
from flask import Flask, jsonify, request, send_from_directory
app = Flask(__name__, static_url_path='')
PAGINATION = 20
BASE_PAGE = 1
mock_data = []
def cors_response(orig_res):
cors_res = jsonify(orig_res)
cors_res.headers['Access-Control-Allow-Origin'] = '*'
cors_res.headers['Access-Control-Allow-Methods'] = 'GET, POST'
cors_res.headers['Access-Control-Allow-Headers'] = 'x-requested-with,\
content-type'
return cors_res
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico',
mimetype='image/vnd.microsoft.icon')
@app.route("/")
def index():
return 'Hello, World!'
@app.route("/goldenshark")
def goldenshark():
return app.send_static_file('goldenshark.html')
@app.route('/list', methods=['GET'])
def list_all():
global mock_data
if request.method == 'GET':
output = []
outdict = {}
for mock_item in mock_data:
output.append({"title": mock_item["title"],
"name": mock_item["name"],
"department": mock_item["department"],
"stage": mock_item["stage"]})
for i, out in enumerate(output):
outdict[i] = out
# Return CORS response
return cors_response(outdict)
@app.route('/search', methods=['GET'])
def list_search():
if request.method == 'GET':
output = []
outdict = {}
# Get para from URL like '/search?name=XX'
# Get para from URL like '/search?makr=XX'
# Get para from URL like '/search?word=XX'
# if request.args.get("name"):
# search_name = request.args.get("name")
# filters = [("name", "=", [search_name])]
# if request.args.get("makr"):
# search_makr = request.args.get("makr")
# filters = [("makr", "=", [search_makr])]
if request.args.get("word"):
search_word = request.args.get("word")
for mock_item in mock_data:
is_hit = 0
if search_word in mock_item["title"]:
is_hit = 1
if search_word in mock_item["name"]:
is_hit = 1
if search_word in mock_item["department"]:
is_hit = 1
if is_hit:
output.append({"title": mock_item["title"],
"name": mock_item["name"],
"department": mock_item["department"],
"stage": mock_item["stage"]})
for i, out in enumerate(output):
outdict[i] = out
print(outdict)
# Return CORS response
return cors_response(outdict)
def import_data():
global mock_data
with open('data.csv', encoding='utf-8-sig') as csvfile:
cr = csv.DictReader(csvfile)
for row in cr:
mock_data.append(row)
if __name__ == '__main__':
import_data()
app.run(debug=True)
``` |
{
"source": "jiangtianyu2009/softcake",
"score": 3
} |
#### File: thzspider/spiders/librariesio.py
```python
import requests
import scrapy
BASE_URL = 'https://libraries.io/search?order=desc&per_page=100&platforms=PyPI&sort=dependent_repos_count&page='
class LibIOSpider(scrapy.Spider):
name = 'libio'
start_urls = []
pkg_names = []
def __init__(self):
LibIOSpider.start_urls.append(BASE_URL + '2')
def parse(self, response):
for codeitem in response.css('div.project'):
pkg_name = codeitem.css('a::text').extract_first()
LibIOSpider.pkg_names.append(pkg_name)
yield {
'name': pkg_name,
}
print(LibIOSpider.pkg_names)
f = open("requirements.txt", "w+")
for name in LibIOSpider.pkg_names:
f.write(name + '\n')
f.close()
```
#### File: thzspider/spiders/nhtchn.py
```python
import requests
import scrapy
from scrapinghub import ScrapinghubClient
BASE_URL = 'https://nhentai.net/language/chinese/'
API_KEY = '11befd9da9304fecb83dfa114d1926e9'
PROJECT_ID = '252342'
class NhtchnSpider(scrapy.Spider):
name = 'nhtchn'
start_urls = []
def __init__(self):
client = ScrapinghubClient(API_KEY, use_msgpack=False)
project = client.get_project(PROJECT_ID)
NhtchnSpider.start_urls.append(BASE_URL)
# div.contain .blue{color:blue;}
# div.contain.blue{color:blue;}
# 以上两种规则分别应用的元素如下:
# 1
# <!--后代-->
# <div class="contain">
# contain
# <span class="blue">blue</span>
# </div>
# 2
# <!--多类-->
# <div class="contain blue">contain and blue</div>
def parse(self, response):
for codeitem in response.css('div.container.index-container'):
for galleryitem in codeitem.css('div.gallery'):
gallery_href = galleryitem.css('a::attr(href)').extract_first()
gallery_caption = galleryitem.css(
'a .caption::text').extract_first()
gallery_thumb = galleryitem.css(
'a .lazyload::attr(data-src)').extract_first()
# id_code = codeitem.css('div.id::text').extract_first()
# href_link = codeitem.css('a::attr(href)').extract_first()
# img_small = codeitem.css('img::attr(src)').extract_first()
yield {
'href': gallery_href,
'capt': gallery_caption,
'thmb': gallery_thumb,
}
# next_page = response.css('a.next::attr("href")').extract_first()
# if next_page is not None:
# yield response.follow(next_page, self.parse)
``` |
{
"source": "Jiangtong-Li/ZHSIR",
"score": 2
} |
#### File: ZHSIR/src/main_d3shape.py
```python
import os
import random
import numpy as np
from scipy.spatial.distance import cdist
import cv2
import time
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
# import torch.multiprocessing as mp
from torch.utils.data import DataLoader
from torch.optim import Adam
# from torch.utils.tensorboard import SummaryWriter
from scipy.spatial.distance import cdist
from package.model.d3shape import D3Shape
from package.loss.d3shape_loss import _D3Shape_loss
from package.dataset.data_d3shape import *
from package.args.d3shape_args import parse_config
from package.dataset.utils import make_logger
from package.model.utils import *
from package.loss.regularization import _Regularization
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def save_fn(save_dir, it, pre=0, mAP=0):
return join(mkdir(join(save_dir, 'models')), 'Iter__{}__{}_{}.pkl'.format(it, int(pre * 1000), int(mAP * 1000)))
def _try_load(args, logger, model, optimizer):
if args.start_from is None:
# try to find the latest checkpoint
files = os.listdir(mkdir(join(mkdir(args.save_dir), 'models')))
if len(files) == 0:
logger.info("Cannot find any checkpoint. Start new training.")
return 0
latest = max(files, key=lambda name: int(name.split('\\')[-1].split('/')[-1].split('.')[0].split('__')[1]))
checkpoint = join(args.save_dir, 'models', latest)
else:
try: checkpoint = save_fn(args.save_dir, str(int(args.start_from)))
except: checkpoint = args.start_from
logger.info("Load model from {}".format(checkpoint))
ckpt = torch.load(checkpoint, map_location='cpu')
model.load_state_dict(ckpt['model'])
optimizer.load_state_dict(ckpt['optimizer'])
return ckpt['steps']
def _extract_feats(data_test, model, what, skip=1, batch_size=16):
"""
:param data_test: test Dataset
:param model: network model
:param what: SK or IMSK
:param skip: skip a certain number of image/sketches to reduce computation
:return: a two-element list [extracted_labels, extracted_features]
"""
labels = []
feats = []
for batch_idx, (xs, id) in \
enumerate(data_test.traverse(what, skip=skip, batch_size=batch_size)):
labels.append(model(xs.cuda()).data.cpu().numpy())
# print(type(labels[0]), labels[0].shape)# <class 'numpy.ndarray'> (16, 256)
# print(type(id), id) # <class 'torch.Tensor'> tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
feats.append(id.numpy())
return np.concatenate(labels), np.concatenate(feats)
def _get_pre_from_matches(matches):
return np.mean(matches)
def _get_map_from_matches(matches):
s = 0
for match in matches:
count = 0
for rank, ismatch in enumerate(match):
s += ismatch * (count + 1) / (rank + 1)
count += ismatch
return s / matches.size
def _eval(feats_labels_sk, feats_labels_im, n):
"""
Refer to https://blog.csdn.net/JNingWei/article/details/78955536 for mAP calculation
:param feats_labels_sk: a two-element tuple [features_of_sketches, labels_of_sketches]
:param feats_labels_im: a two-element tuple [features_of_images, labels_of_images]
:param n: the top n elements used for evaluation
:return: precision@n, mAP@n
"""
# print("eval\n", len(feats_labels_sk[1]), len(feats_labels_im[1])) # 1099 581
dists = cdist(feats_labels_sk[0], feats_labels_im[0], 'euclidean')
# print("dists.shape=", dists.shape) # (1099, 581)
ranks = np.argsort(dists, 1) # (1099, 581)
ranksn = ranks[:, :n] # (1099, 50)
# print("dists ranks ranksn shape=", dists.shape, ranks.shape, ranksn.shape)
classesn = np.array([[feats_labels_im[1][i] == feats_labels_sk[1][r] for i in ranksn[r]] for r in range(len(ranksn))])
return _get_pre_from_matches(classesn), _get_map_from_matches(classesn)
def _parse_args_paths(args):
if args.dataset == 'sketchy':
sketch_folder = SKETCH_FOLDER_SKETCHY
imsk_folder = IMSKAGE_FOLDER_SKETCHY
train_class = TRAIN_CLASS_SKETCHY
test_class = TEST_CLASS_SKETCHY
elif args.dataset == 'tuberlin':
sketch_folder = SKETCH_FOLDER_TUBERLIN
imsk_folder = IMSKAGE_FOLDER_TUBERLIN
train_class = TRAIN_CLASS_TUBERLIN
test_class = TEST_CLASS_TUBERLIN
else: raise Exception("dataset args error!")
if args.sketch_dir != '': sketch_folder = args.sketch_dir
if args.imsk_dir != '': imsk_folder = args.imsk_dir
if args.npy_dir == '0': args.npy_dir = NPY_FOLDER_SKETCHY
elif args.npy_dir == '': args.npy_dir = None
return sketch_folder, imsk_folder, train_class, test_class
def train(args):
# srun -p gpu --gres=gpu:1 --output=d3shape_sketchy.out python main_d3shape.py --steps 50000 --print_every 200 --npy_dir 0 --save_every 1000 --batch_size 8 --dataset sketchy --save_dir d3shape_sketchy
sketch_folder, imsk_folder, train_class, test_class = _parse_args_paths(args)
data_train = D3Shape_dataloader(folder_sk=sketch_folder, clss=train_class, folder_nps=args.npy_dir,
folder_imsk=imsk_folder, normalize01=False, doaug=False)
dataloader_train = DataLoader(dataset=data_train, batch_size=args.batch_size, shuffle=False)
data_test = D3Shape_dataloader(folder_sk=sketch_folder, clss=test_class, folder_nps=args.npy_dir,
folder_imsk=imsk_folder, normalize01=False, doaug=False)
model = D3Shape()
model.cuda()
optimizer = Adam(params=model.parameters(), lr=args.lr)
logger = make_logger(join(mkdir(args.save_dir), curr_time_str() + '.log'))
steps = _try_load(args, logger, model, optimizer)
logger.info(str(args))
args.steps += steps
d3shape_loss = _D3Shape_loss(cp=args.cp, cn=args.cn)
model.train()
l2_regularization = _Regularization(model, args.l2_reg, p=2, logger=None)
while True:
loss_sum = []
for _, (sketch1, imsk1, sketch2, imsk2, is_same) in enumerate(dataloader_train):
optimizer.zero_grad()
sketch1_feats, imsk1_feats = model(sketch1.cuda(), imsk1.cuda())
sketch2_feats, imsk2_feats = model(sketch2.cuda(), imsk2.cuda())
loss = d3shape_loss(sketch1_feats, imsk1_feats, sketch2_feats, imsk2_feats, is_same.cuda()) \
+ l2_regularization()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
loss_sum.append(float(loss.item()))
if (steps + 1) % args.save_every == 0:
model.eval()
n = 50; skip = 1
start_cpu_t = time.time()
feats_labels_sk = _extract_feats(data_test, lambda sk: model(sk, None)[0], SK, skip=skip,
batch_size=args.batch_size)
feats_labels_imsk = _extract_feats(data_test, lambda imsk: model(None, imsk)[0], IMSK, skip=skip,
batch_size=args.batch_size)
pre, mAP = _eval(feats_labels_sk, feats_labels_imsk, n)
logger.info("Precision@{}: {}, mAP@{}: {}".format(n, pre, n, mAP) +
" " + 'step: {}, loss: {}, (eval cpu time: {}s)'.format(steps, np.mean(loss_sum),
time.time() - start_cpu_t))
torch.save({'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'steps': steps,
'args': args},
save_fn(args.save_dir, steps, pre, mAP))
model.train()
if (steps + 1) % args.print_every == 0:
logger.info('step: {}, loss: {}'.format(steps, np.mean(loss_sum)))
loss_sum = []
steps += 1
if steps >= args.steps: break
if steps >= args.steps: break
if __name__ == '__main__':
args = parse_config()
train(args)
```
#### File: ZHSIR/src/main_regression.py
```python
import os
import random
import time
import numpy as np
from scipy.spatial.distance import cdist
import cv2
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
from torch.optim import Adam, SGD
from torch.utils.tensorboard import SummaryWriter
from package.model.regression import Regressor
from package.loss.regularization import _Regularization
from package.dataset.data_cmd_translate import CMDTrans_data
from package.args.cvae_args import parse_config
from package.dataset.utils import make_logger
from package import cal_matrics_single
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train(args):
writer = SummaryWriter()
logger = make_logger(args.log_file)
if args.zs:
packed = args.packed_pkl_zs
else:
packed = args.packed_pkl_nozs
logger.info('Loading the data ...')
data = CMDTrans_data(args.sketch_dir, args.image_dir, args.stats_file, args.embedding_file,
packed, args.preprocess_data, args.raw_data, zs=args.zs, sample_time=1,
cvae=True, paired=False, cut_part=False)
dataloader_train = DataLoader(dataset=data, num_workers=args.num_worker, \
batch_size=args.batch_size,
shuffle=args.shuffle)
logger.info('Training sketch size: {}'.format(len(data.path2class_sketch.keys())))
logger.info('Training image size: {}'.format(len(data.path2class_image.keys())))
logger.info('Testing sketch size: {}'.format(len(data.path2class_sketch_test.keys())))
logger.info('Testing image size: {}'.format(len(data.path2class_image_test.keys())))
logger.info('Building the model ...')
model = Regressor(args.raw_size, args.hidden_size, dropout_prob=args.dropout, logger=logger)
logger.info('Building the optimizer ...')
optimizer = Adam(params=model.parameters(), lr=args.lr, betas=(0.5, 0.999))
l1_regularization = _Regularization(model, args.l1_weight, p=1, logger=logger)
l2_regularization = _Regularization(model, args.l2_weight, p=2, logger=logger)
if args.start_from is not None:
logger.info('Loading pretrained model from {} ...'.format(args.start_from))
ckpt = torch.load(args.start_from, map_location='cpu')
model.load_state_dict(ckpt['model'])
optimizer.load_state_dict(ckpt['optimizer'])
if args.gpu_id != -1:
model.cuda(args.gpu_id)
optimizer.zero_grad()
loss_tri_acm = 0.
loss_l1_acm = 0.
loss_l2_acm = 0.
batch_acm = 0
global_step = 0
best_precision = 0.
best_iter = 0
patience = args.patience
logger.info('Hyper-Parameter:')
logger.info(args)
logger.info('Model Structure:')
logger.info(model)
logger.info('Begin Training !')
while True:
if patience <= 0:
break
for sketch_batch, image_p_batch, image_n_batch, _semantics_batch in dataloader_train:
sketch_batch = sketch_batch.float()
image_p_batch = image_p_batch.float()
image_n_batch = image_n_batch.float()
if global_step % args.print_every == 0 % args.print_every and global_step and batch_acm % args.cum_num == 0:
logger.info('*** Iter {} ***'.format(global_step))
logger.info(' Loss/Triplet {:.3}'.format(loss_tri_acm/args.print_every/args.cum_num))
logger.info(' Loss/L1 {:.3}'.format(loss_l1_acm/args.print_every/args.cum_num))
logger.info(' Loss/L2 {:.3}'.format(loss_l2_acm/args.print_every/args.cum_num))
loss_tri_acm = 0.
loss_l1_acm = 0.
loss_l2_acm = 0.
if global_step % args.save_every == 0 % args.save_every and batch_acm % args.cum_num == 0 and global_step :
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
torch.save({'args':args, 'model':model.state_dict(), 'optimizer':optimizer.state_dict()},
'{}/Iter_{}.pkl'.format(args.save_dir,global_step))
### Evaluation
model.eval()
image_label = list()
image_feature = list()
for image, label in data.load_test_images(batch_size=args.batch_size):
image = image.float()
if args.gpu_id != -1:
image = image.cuda(args.gpu_id)
image_label += label
tmp_feature = model.inference_image(image).cpu().detach().numpy()
image_feature.append(tmp_feature)
image_feature = np.vstack(image_feature)
sketch_label = list()
sketch_feature = list()
for sketch, label in data.load_test_sketch(batch_size=args.batch_size):
sketch = sketch.float()
if args.gpu_id != -1:
sketch = sketch.cuda(args.gpu_id)
sketch_label += label
tmp_feature = model.inference_sketch(sketch).cpu().detach().numpy()
sketch_feature.append(tmp_feature)
sketch_feature = np.vstack(sketch_feature)
Precision, mAP, = cal_matrics_single(image_feature, image_label, sketch_feature, sketch_label)
writer.add_scalar('Precision_200/cosine', Precision, global_step)
writer.add_scalar('mAP_200/cosine', mAP, global_step)
logger.info('*** Evaluation Iter {} ***'.format(global_step))
logger.info(' Precision {:.3}'.format(Precision))
logger.info(' mAP {:.3}'.format(mAP))
if best_precision < Precision:
patience = args.patience
best_precision = Precision
best_iter = global_step
writer.add_scalar('Best/Precision_200', best_precision, best_iter)
logger.info('Iter {}, Best Precision_200 {:.3}'.format(global_step, best_precision))
torch.save({'args':args, 'model':model.state_dict(), \
'optimizer':optimizer.state_dict()}, '{}/Best.pkl'.format(args.save_dir))
else:
patience -= 1
if patience <= 0:
break
model.train()
batch_acm += 1
if global_step <= args.warmup_steps:
update_lr(optimizer, args.lr*global_step/args.warmup_steps)
if args.gpu_id != -1:
sketch_batch = sketch_batch.cuda(args.gpu_id)
image_p_batch = image_p_batch.cuda(args.gpu_id)
image_n_batch = image_n_batch.cuda(args.gpu_id)
loss = model(sketch_batch, image_p_batch, image_n_batch)
loss_l1 = l1_regularization()
loss_l2 = l2_regularization()
loss_tri = loss.item()
loss_l1_acm += (loss_l1.item() / args.l1_weight)
loss_l2_acm += (loss_l2.item() / args.l2_weight)
loss_tri_acm += loss_tri
writer.add_scalar('Loss/Triplet', loss_tri, global_step)
writer.add_scalar('Loss/Reg_l1', (loss_l1.item() / args.l1_weight), global_step)
writer.add_scalar('Loss/Reg_l2', (loss_l2.item() / args.l2_weight), global_step)
loss_ = 0
loss_ += loss
loss_.backward()
if batch_acm % args.cum_num == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
global_step += 1
optimizer.zero_grad()
if __name__ == '__main__':
args = parse_config()
train(args)
```
#### File: package/dataset/data_dsh.py
```python
import os
import csv
import time
import random
import pickle
import torchvision
from torchvision import transforms
from sklearn.decomposition import PCA
import cv2
import random
import numpy as np
import torch
from torch.utils.data import Dataset as torchDataset
from torchvision.transforms import Normalize
from package.dataset.utils import *
from package.dataset.data_san import RandomShift, npfn
try:
import gensim
except:
pass
join = os.path.join
SK = 0
IMSK = 1
IM = 2
IM_SIZE = 227
SK_SIZE = 200
PATH_SEMANTIC_MODEL = '../datasets/vecs/GoogleNews-vectors-negative300.bin'
PATH_SEMANTIC_SKETCHY = '../datasets/vecs/semantic_sketchy.pkl'
PATH_SEMANTIC_TUBERLIN = '../datasets/vecs/semantic_tuberlin.pkl'
SKETCH_FOLDER_SKETCHY = 'G:/f/SJTUstudy/labNL/ZS_SBIR/256x256/sketch/tx_000000000010'
IMSKAGE_FOLDER_SKETCHY = 'G:/f/SJTUstudy/labNL/SBIR_datasets/sketchy/sketch_tokens'
IMAGE_FOLDER_SKETCHY = 'G:/f/SJTUstudy/labNL/SBIR_datasets/sketchy/EXTEND_image_sketchy'
NPY_FOLDER_SKETCHY = '../datasets/npy_sketchy'
SKETCH_FOLDER_TUBERLIN = 'G:/f/SJTUstudy/labNL/SBIR_datasets/tuberlin/ImageResized'
IMSKAGE_FOLDER_TUBERLIN = 'G:/f/SJTUstudy/labNL/SBIR_datasets/tuberlin/png'
IMAGE_FOLDER_TUBERLIN = 'G:/f/SJTUstudy/labNL/SBIR_datasets/tuberlin/png'
NPY_FOLDER_TUBERLIN = ''
if True:
PATH_SEMANTIC_SKETCHY = r'G:\f\SJTUstudy\labNL\SBIR_datasets\vecs\semantic_sketchy.pkl'
NPY_FOLDER_SKETCHY = r'G:\f\SJTUstudy\labNL\SBIR_datasets\sketchy\npy_sketchy'
try:
TEST_CLASS_SKETCHY = list(TEST_CLASS_SKETCHY)
TRAIN_CLASS_SKETCHY = list(TRAIN_CLASS_SKETCHY)
except:
# default
TEST_CLASS_SKETCHY = list(TEST_CLASS)
TRAIN_CLASS_SKETCHY = list(TRAIN_CLASS)
try:
TEST_CLASS_TUBERLIN = list(TEST_CLASS_TUBERLIN)
TRAIN_CLASS_TUBERLIN = list(TRAIN_CLASS_SKETCHY)
except:
TEST_CLASS_TUBERLIN = list()
TRAIN_CLASS_TUBERLIN = list()
class DSH_dataloader(torchDataset):
"""
ATTENTION: access to the dataset via the same index can result in different elements
"""
def __init__(self, folder_saving, path_semantic, folder_sk=None, folder_im=None, folder_imsk=None, clss=None, normalize01=False, doaug=True,
folder_nps=None, m=300, logger=None):
"""
Attirbute:
BS, ns * m
BI, ni * m
D, d * m
vec_bs, ns * d
vec_bi, ni * d
Ws ~ ni * ns, implemented with memory mapping.
:param folder_sk: sketch folder
:param folder_im: image folder
:param folder_imsk: image's sketch token folder.
ATTENTION: this folder contains sketch tokens of the corresponding images, not images!
:param clss: classes to load
:param normalize01: whether normalize data to 0-1
:param doaug: whether do data augmentation
:param folder_nps: the folder saves npy files. This allow fewer inodes to save the datasets(the server
does not allow too many inodes allocated). The folder should contain
classname1_sk.npy, classname1_imsk.npy, classname1_im.npy,
classname2_sk.npy, classname2_imsk.npy, classname1_im.npy,
...
1. If folder_nps is None, folder_sk and folder_imsk must be provided.
2. If folder_nps is not None but no files exist in the folder, folder_sk and folder_im must be
provided, and such files would be created in folder_nps.
3. If folder_nps is not None and files exist in the folder, load the files instead of those
in folder_sk and folder_imsk for training.
:param path_semantic: path of the semantic vector(xxx.pkl). It should be a dict: {class_name1: [b1, b2, ...],
class_name2: [b1, b2, ...]}
:param m: number of binary bits
:param folder_saving: folder to save/load binary codes
:param logger: logger to debug.
"""
super(DSH_dataloader, self).__init__()
self.idx2skim_pair = []
self.logger = logger
self.normalize01 = normalize01
self.doaug = doaug
self._build_trans()
self.cls2idx = {}
self.idx2cls = []
self.semantics = []
self.lens = [0, 0, 0]
self.folder_saving = folder_saving
self.clss = clss
self.m = m
self.vec_bi = []
self.vec_bs = []
self.label_all_i = []
self.label_all_s = []
folders = [folder_sk, folder_imsk, folder_im]
if not os.path.exists(folder_saving):
os.mkdir(folder_saving)
semantics = pickle.load(open(path_semantic, 'rb'))
if folder_nps and not os.path.exists(folder_nps):
os.mkdir(folder_nps)
for name in clss:
self.semantics.append(semantics[name])
if all([os.path.exists(str(fd)) for fd in folders]):
sks_folder = join(folders[SK], name)
imsks_folder = join(folders[IMSK], name)
ims_folder = join(folders[IM], name)
# print(folder_nps, name, join(folder_nps, npfn(name + '_imsk')), os.path.exists(join(folder_nps, npfn(name + '_imsk'))))
if folder_nps and os.path.exists(join(folder_nps, npfn(name + '_imsk'))):
data_of_name = [np.load(join(folder_nps, npfn(name + '_sk'))),
np.load(join(folder_nps, npfn(name + '_imsk'))),
np.load(join(folder_nps, npfn(name + '_im2')))]
# print(data_of_name[SK].shape, data_of_name[IMSK].shape, data_of_name[IM].shape)
else:
data_of_name = self._get_data_from_ims(sks_folder=sks_folder, imsks_folder=imsks_folder,
ims_folder=ims_folder)
data_of_name = self._process(data_of_name)
self._try_save_ims(folder_nps=folder_nps, name=name, data_of_name=data_of_name)
for i in range(3):
self.lens[i] += len(data_of_name[i])
self.vec_bi += [semantics[name] for _ in range(len(data_of_name[IM]))]
self.vec_bs += [semantics[name] for _ in range(len(data_of_name[SK]))]
self.idx2skim_pair.append(data_of_name)
self.cls2idx[name] = len(self.idx2cls)
self.idx2cls.append(name)
self.label_all_i.append(np.zeros(len(data_of_name[IM])) + self.cls2idx[name])
self.label_all_s.append(np.zeros(len(data_of_name[SK])) + self.cls2idx[name])
self.semantics = np.asarray(self.semantics)
self._print('Dataset loaded from folder_sk:{}, folder_imsk:{}, folder_im:{}, folder_nps:{}, sk_len:{},\
imsk_len:{}, im_len:{}'.format(
folder_sk, folder_imsk, folder_im, folder_nps, self.lens[SK], self.lens[IMSK], self.lens[IM]))
self.vec_bs = np.asarray(self.vec_bs)
self.vec_bi = np.asarray(self.vec_bi)
self.label_all_i = np.hstack(self.label_all_i)
self.label_all_s = np.hstack(self.label_all_s)
self._init_W(label_all_i=self.label_all_i, label_all_s=self.label_all_s)
self._init_B()
self._init_D()
self._print('Dataset init done.')
# print("len(self.idx2skim_pair)=", len(self.idx2skim_pair))
def _init_W(self, label_all_i, label_all_s):
file = join(self.folder_saving, 'W_tmp.npmm')
if os.path.exists(file):
self.W = np.memmap(file, dtype=np.float16, shape=(len(label_all_i), len(label_all_s)), mode='r')
return None
W = np.memmap(file, dtype=np.float16, shape=(len(label_all_i), len(label_all_s)), mode='w+')
for i in range(len(label_all_i)):
W[i] = [(s == label_all_i[i]) * 2 - 1 for s in label_all_s]
self.W = W
def _print(self, s):
print(s) if self.logger is None else self.logger.info(s)
def _init_D(self):
d_file = join(self.folder_saving, npfn('d'))
if os.path.exists(d_file):
self._print("Init D matrix from {}".format(d_file))
self.D = np.load(d_file)
else:
self._print("Reinit D matrix. It is OK since D can be inferred from BI and BS.")
self.D = np.random.rand(self.d(), self.m)
def d(self):
return len(self.semantics[0])
def _get_data_from_ims(self, sks_folder, ims_folder, imsks_folder):
paths = [path.split('.')[0] for path in sorted(os.listdir(sks_folder))]
paths = [fn.split('.')[0] for fn in sorted(os.listdir(imsks_folder)) if fn.split('.')[0] in paths]
data_of_name = [[self._prep_img(join(sks_folder, path)) for path in os.listdir(sks_folder)
if path.endswith('.jpg') or path.endswith('.png')],
[self._prep_img(join(imsks_folder, path)) for path in sorted(os.listdir(imsks_folder))
if ((path.endswith('.jpg') or path.endswith('.png')) and path.split('.')[0] in paths)],
[self._prep_img(join(ims_folder, path)) for path in sorted(os.listdir(ims_folder))
if ((path.endswith('.jpg') or path.endswith('.png')) and path.split('.')[0] in paths)]]
return data_of_name
def _init_B(self):
folder_saving = self.folder_saving
m = self.m
folder_bi = join(folder_saving, npfn('bi'))
folder_bs = join(folder_saving, npfn('bs'))
if os.path.exists(folder_bi) and os.path.exists(folder_bs):
self._print("Init BI matrix from {} and BS matrix from {}.".format(folder_bi, folder_bs))
self.BI = np.load(join(folder_saving, npfn('bi')))
self.BS = np.load(join(folder_saving, npfn('bs')))
else:
self._print("Reinit BI and BS matrix!")
self.BI = np.random.randint(0, 2, [self.lens[IM], m]) * 2 - 1
self.BS = np.random.randint(0, 2, [self.lens[SK], m]) * 2 - 1
def _process(self, data_of_name):
for i in range(3):
# In the DSH configuration, input of C2-Net is a single-channel image
# while input of C1-Net is a three-channel image
if i == IM:
data_of_name[i] = [cv2.resize(img, (IM_SIZE, IM_SIZE)) \
if img.shape != (IM_SIZE, IM_SIZE, 3) else img for img in data_of_name[i]]
else:
data_of_name[i] = [cv2.resize(img, (SK_SIZE, SK_SIZE))[:, :, 0] \
if img.shape != (SK_SIZE, SK_SIZE, 3) else img[:, :, 0] for img in data_of_name[i]]
data_of_name[i] = np.asarray(data_of_name[i], dtype=np.uint8)
return data_of_name
def _try_save_ims(self, folder_nps, name, data_of_name):
if folder_nps:
if not os.path.exists(join(folder_nps, npfn(name + '_imsk'))):
np.save(join(folder_nps, npfn(name + '_imsk')), data_of_name[IMSK])
if not os.path.exists(join(folder_nps, npfn(name + '_sk'))):
np.save(join(folder_nps, npfn(name + '_sk')), data_of_name[SK])
if not os.path.exists(join(folder_nps, npfn(name + '_im2'))):
np.save(join(folder_nps, npfn(name + '_im2')), data_of_name[IM])
assert len(data_of_name[IM]) == len(data_of_name[IMSK]), 'Sketch token and images must satisfy one-to-one \
correspondence. (Error while disposing class {})'.format(name)
def _build_trans(self):
self.trans_im = transforms.Compose([
RandomShift(),
transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(5),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]) if self.doaug else \
transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
self.trans_sk = transforms.Compose([
RandomShift(),
transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(5),
transforms.ToTensor()
]) if self.doaug else \
transforms.Compose([transforms.ToTensor()])
self.trans = [0,0,0]
self.trans[IM] = self.trans_im
self.trans[SK] = self.trans_sk
self.trans[IMSK] = self.trans_sk
def save_params(self):
np.save(join(self.folder_saving, npfn('bi')), self.BI)
np.save(join(self.folder_saving, npfn('bs')), self.BS)
np.save(join(self.folder_saving, npfn('d')), self.D)
def __del__(self):
# super(DSH_dataloader, self).__del__()
self.save_params()
def _prep_img(self, path):
# print(path)
img = cv2.imread(path)
img = img.copy()[:,:,::-1]
if img.shape != (IM_SIZE,IM_SIZE,3):
img = cv2.resize(img, (IM_SIZE, IM_SIZE))
if self.normalize01:
img = img / 255.0
return img
def __getitem__(self, cls_idx):
"""
:param index: index of the data
:return: a tensor list [sketch, code_of_sketch, image, sketch_token, code_of_image]
"""
cls_idx %= len(self.idx2skim_pair)
# print(cls_idx, SK, IM, len(self.idx2skim_pair[cls_idx][SK]), len(self.idx2skim_pair[cls_idx][IM]))
sk_idx = np.random.randint(0, len(self.idx2skim_pair[cls_idx][SK]))
im_idx = np.random.randint(0, len(self.idx2skim_pair[cls_idx][IM]))
return [self.trans_sk(self.idx2skim_pair[cls_idx][SK][sk_idx]), self.BS[sk_idx].astype(np.float32),
self.trans_im(self.idx2skim_pair[cls_idx][IM][im_idx]),
self.trans_sk(self.idx2skim_pair[cls_idx][IMSK][im_idx]), self.BI[im_idx].astype(np.float32)]
def traverse(self, what, batch_size=16, skip=1):
"""
:param what: SK or IM
:param batch_size: batch size of the traversing
:param skip: skip >= 2 allows to skip some images/sketches to reduce computation. (Used for debugging)
:return: yield a four-element list [sketches, sketch_tokens, images, id]
"""
it = 0
assert what == IM or what == SK, "DSH_dataloader.traverse: what must be IM({})/SK({}), but get {}"\
.format(IM, SK, what)
rets_ims = []; rets_ids = []; rets_imsks = []
for id, xs in enumerate(self.idx2skim_pair):
for i, x in enumerate(xs[what]):
it += 1
if it % skip == 0:
rets_ims.append(self.trans[what](x))
if what == IM:
rets_imsks.append(self.trans_sk(xs[IMSK][i]))
rets_ids.append(id)
if len(rets_ims) == batch_size:
if what == IM:
yield None, torch.stack(rets_imsks, dim=0), torch.stack(rets_ims, dim=0), torch.tensor(rets_ids)
else:
yield torch.stack(rets_ims, dim=0), None, None, torch.tensor(rets_ids)
rets_ims = []; rets_ids = []; rets_imsks = []
# Avoid single element returned fot batch normalization's convenience
if len(rets_ids) >= 1:
if what == IM:
yield None, torch.stack(rets_imsks, dim=0), torch.stack(rets_ims, dim=0), torch.tensor(rets_ids)
else:
yield torch.stack(rets_ims, dim=0), None, None, torch.tensor(rets_ids)
def __len__(self):
return max(self.lens)
def _create_im2(folder_im, folder_imsk, folder_im2):
if not os.path.exists(folder_im2):
os.mkdir(folder_im2)
for cls in os.listdir(folder_im):
ims = []
fils_imsk = set([f.split('.')[0] for f in os.listdir(join(folder_imsk, cls))])
for name in os.listdir(join(folder_im, cls)):
if name.split('.')[0] not in fils_imsk:
continue
img = cv2.imread(join(folder_im, cls, name))
img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE))
ims.append(img)
print(cls, len(fils_imsk), len(ims))
np.save(join(folder_im2, cls + npfn('_im2')), np.asarray(ims, dtype=np.uint8))
def gen_vec_sketchy():
path_vec = '../datasets/vecs/GoogleNews-vectors-negative300.bin'
word2vec = gensim.models.KeyedVectors.load_word2vec_format(path_vec, binary=True, unicode_errors='....')
semantics = {}
for idx, item in enumerate(TEST_CLASS_SKETCHY + TRAIN_CLASS_SKETCHY):
semantics[item] = word2vec[item] if item in word2vec else word2vec[SEMANTICS_REPLACE[item]]
print(len(semantics), len(semantics[item]))
pickle.dump(semantics, '../datasets/vecs/sketchy.pkl')
def _test():
ds = DSH_dataloader(clss=TRAIN_CLASS_SKETCHY[:2], doaug=False, folder_nps=NPY_FOLDER_SKETCHY,
path_semantic=PATH_SEMANTIC_SKETCHY, folder_saving='test_dsh_ds')
sketch, code_of_sketch, image, sketch_token, code_of_image = ds[4]
print(sketch.shape, code_of_sketch.shape, image.shape, sketch_token.shape, code_of_image.shape)
def _test_W():
label_all_s = np.array([0,0,1,2,2])
label_all_i = np.array([0,1,1,2])
W = np.asarray([[((i == s) * 2 - 1) for s in label_all_s] for i in label_all_i], dtype=np.int16)
print('A1=\n', label_all_i, '\n', 'A2=\n',label_all_s, '\n', 'B=\n',W)
eqs = (label_all_s == label_all_i)
if __name__=="__main__":
_test_W()
pass
# create_im2(r'G:\f\SJTUstudy\labNL\SBIR_datasets\sketchy\EXTEND_image_sketchy', r'G:\f\SJTUstudy\labNL\SBIR_datasets\sketchy\sketch_tokens', r'G:\f\SJTUstudy\labNL\SBIR_datasets\sketchy\im2')
```
#### File: package/dataset/data_zsih.py
```python
import os
import csv
import time
import random
import pickle
import gensim
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset as torchDataset
from torchvision.transforms import Normalize, ToTensor
from package.dataset.utils import match_filename, TEST_CLASS, TRAIN_CLASS, IMAGE_SIZE, SEMANTICS_REPLACE
class ZSIH_dataloader(torchDataset):
def __init__(self, sketch_dir, image_dir, stats_file, embedding_file, loaded_data, normalize=False, zs=True):
super(ZSIH_dataloader, self).__init__()
self.sketch_dir = sketch_dir
self.image_dir = image_dir
self.stats_file = stats_file
self.embedding_file = embedding_file
self.normalize = normalize
self.loaded_data = loaded_data
self.ToTensor = ToTensor()
self.Normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.train_class = TRAIN_CLASS
self.test_class = TEST_CLASS
self.overall_class = self.train_class | self.test_class
self.zs = zs
self.class2path_sketch = dict() # class: set(path) | for sketch | for train
self.class2path_image = dict() # class: set(path) | for image | for train
self.path2class_sketch = dict() # path: class | for sketch | for train
self.path2class_image = dict() # path: class | for image | for train
self.class2path_sketch_test = dict() # class: set(path) | for sketch | for test
self.class2path_image_test = dict() # class: set(path) | for image | for test
self.path2class_sketch_test = dict() # path: class | for sketch | for test
self.path2class_image_test = dict() # path: class | for image | for test
self.id2path = list() # path list corresponding to path2class_sketch
self.loaded_image = dict() # path: loaded image
self.class2id = dict() # class name to semantics index
self.id2class = list() # semantics index to class name
self.pretrain_embedding = np.zeros((len(list(self.train_class)) + len(list(self.test_class)), 300))
self.load()
random.shuffle(self.id2path)
def __getitem__(self, index):
sketch = self.load_each_image_use(self.id2path[index])
cla = self.path2class_sketch[self.id2path[index]]
image, _path = self.pair_similar(cla)
semantics = np.zeros((1))
semantics[0] = self.class2id[cla]
return sketch, image, semantics
def __len__(self):
return len(self.id2path)
def pair_similar(self, cls):
path_list = list(self.class2path_image[cls])
path = random.choice(path_list)
return self.load_each_image_use(path), path
def load_each_image_use(self, path):
image = cv2.imread(path)
try:
if image.shape[2] == 1:
image = np.concatenate([image, image, image], 2)
except:
print(path)
if image.shape != (IMAGE_SIZE, IMAGE_SIZE, 3):
image = cv2.resize(image, (IMAGE_SIZE, IMAGE_SIZE))
image = self.ToTensor(image)
image = self.Normalize(image)
return image
def load_test_images(self, batch_size=512):
ims = []
label = []
for path in self.path2class_image_test.keys():
ims.append(self.load_each_image_use(path))
label.append(self.path2class_image_test[path])
if len(ims) == batch_size:
yield torch.stack(ims), label
ims = []
label = []
yield torch.stack(ims), label
def load_test_sketch(self, batch_size=512):
ims = []
label = []
for path in self.path2class_sketch_test.keys():
ims.append(self.load_each_image_use(path))
label.append(self.path2class_sketch_test[path])
if len(ims) == batch_size:
yield torch.stack(ims), label
ims = []
label = []
yield torch.stack(ims), label
def load_train_images(self, batch_size=512):
ims = []
label = []
for path in self.path2class_image.keys():
ims.append(self.load_each_image_use(path))
label.append(self.path2class_image[path])
if len(ims) == batch_size:
yield torch.stack(ims), label
ims = []
label = []
yield torch.stack(ims), label
def load_train_sketch(self, batch_size=512):
ims = []
label = []
for path in self.path2class_sketch.keys():
ims.append(self.load_each_image_use(path))
label.append(self.path2class_sketch[path])
if len(ims) == batch_size:
yield torch.stack(ims), label
ims = []
label = []
yield torch.stack(ims), label
def load(self):
if os.path.exists(self.loaded_data):
with open(self.loaded_data, 'rb') as f:
preloaded_data = pickle.load(f)
# Semantics part
self.class2id = preloaded_data['class2id']
self.id2class = preloaded_data['id2class']
self.pretrain_embedding = preloaded_data['pretrain_embedding']
# Train part
self.path2class_sketch = preloaded_data['path2class_sketch']
self.class2path_sketch = preloaded_data['class2path_sketch']
self.path2class_image = preloaded_data['path2class_image']
self.class2path_image = preloaded_data['class2path_image']
self.id2path = preloaded_data['id2path']
# Test part
self.class2path_sketch_test = preloaded_data['class2path_sketch_test']
self.class2path_image_test = preloaded_data['class2path_image_test']
self.path2class_sketch_test = preloaded_data['path2class_sketch_test']
self.path2class_image_test = preloaded_data['path2class_image_test']
return
# train part
for idx, cla in enumerate(self.overall_class):
# semantics part
self.class2id[cla] = idx
self.id2class.append(cla)
# image part
image_cla_dir = [os.path.join(self.image_dir, cla, fname) for fname in os.listdir(os.path.join(self.image_dir, cla))]
sketch_cla_dir = [os.path.join(self.sketch_dir, cla, fname) for fname in os.listdir(os.path.join(self.sketch_dir, cla))]
if self.zs:
if cla in self.train_class:
if cla not in self.class2path_image:
self.class2path_image[cla] = list()
if cla not in self.class2path_sketch:
self.class2path_sketch[cla] = list()
for path in image_cla_dir:
self.path2class_image[path] = cla
self.class2path_image[cla].append(path)
for path in sketch_cla_dir:
self.path2class_sketch[path] =cla
self.id2path.append(path)
self.class2path_sketch[cla].append(path)
else:
if cla not in self.class2path_image_test:
self.class2path_image_test[cla] = list()
if cla not in self.class2path_sketch_test:
self.class2path_sketch_test[cla] = list()
for path in image_cla_dir:
self.path2class_image_test[path] = cla
self.class2path_image_test[cla].append(path)
for path in sketch_cla_dir:
self.path2class_sketch_test[path] =cla
self.class2path_sketch_test[cla].append(path)
else:
if cla in self.test_class:
random.shuffle(image_cla_dir)
random.shuffle(sketch_cla_dir)
train_im = image_cla_dir[:int(0.5*len(image_cla_dir))]
test_im = image_cla_dir[int(0.5*len(image_cla_dir)):]
train_sk = sketch_cla_dir[:int(0.5*len(sketch_cla_dir))]
test_sk = sketch_cla_dir[int(0.5*len(sketch_cla_dir)):]
self.class2path_image[cla] = train_im
self.class2path_sketch[cla] = train_sk
self.class2path_image_test[cla] = test_im
self.class2path_sketch_test[cla] = test_sk
for path in train_im:
self.path2class_image[path] = cla
for path in train_sk:
self.path2class_sketch[path] = cla
self.id2path.append(path)
for path in test_im:
self.path2class_image_test[path] = cla
for path in test_sk:
self.path2class_sketch_test[path] = cla
# load embedding
word2vec = gensim.models.KeyedVectors.load_word2vec_format(self.embedding_file, binary=True)
for idx, item in enumerate(self.id2class):
if item not in word2vec:
item = SEMANTICS_REPLACE[item]
self.pretrain_embedding[idx] = word2vec[item]
self.pretrain_embedding = torch.from_numpy(self.pretrain_embedding)
assert len(self.id2class) == 125
assert len(self.path2class_sketch.keys()) == len(self.id2path)
preloaded_data = dict()
# Semantics part
preloaded_data['class2id'] = self.class2id
preloaded_data['id2class'] = self.id2class
preloaded_data['pretrain_embedding'] = self.pretrain_embedding
# Train part
preloaded_data['path2class_sketch'] = self.path2class_sketch
preloaded_data['class2path_sketch'] = self.class2path_sketch
preloaded_data['path2class_image'] = self.path2class_image
preloaded_data['class2path_image'] = self.class2path_image
preloaded_data['id2path'] = self.id2path
# Test part
preloaded_data['class2path_sketch_test'] = self.class2path_sketch_test
preloaded_data['class2path_image_test'] = self.class2path_image_test
preloaded_data['path2class_sketch_test'] = self.path2class_sketch_test
preloaded_data['path2class_image_test'] = self.path2class_image_test
with open(self.loaded_data, 'wb') as f:
pickle.dump(preloaded_data, f)
return
```
#### File: package/dataset/old_code.py
```python
class Siamese_dataloader(torchDataset):
def __init__(self, sketch_dir_train, image_dir_train, stats_file_train, \
sketch_dir_test, image_dir_test, stats_file_test, loaded_data, normalize=False):
super(Siamese_dataloader, self).__init__()
self.sketch_dir_train = sketch_dir_train
self.image_dir_train = image_dir_train
self.stats_file_train = stats_file_train
self.sketch_dir_test = sketch_dir_test
self.image_dir_test = image_dir_test
self.stats_file_test = stats_file_test
self.normalize = normalize
self.loaded_data = loaded_data
self.Normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.sketch_files_train = [os.path.join(self.sketch_dir_train, item) for item in os.listdir(self.sketch_dir_train)]
self.image_files_train = [os.path.join(self.image_dir_train, item) for item in os.listdir(self.image_dir_train)]
self.sketch_files_test = [os.path.join(self.sketch_dir_test, item) for item in os.listdir(self.sketch_dir_test)]
self.image_files_test = [os.path.join(self.image_dir_test, item) for item in os.listdir(self.image_dir_test)]
self.class2id = dict()
self.id2path = list() # path list corresponding to path2class_sketch
self.loaded_image = dict() # path: loaded image
self.class2imgid = dict() # class: set(id)
self.class2path_sketch = dict() # class: set(path) | for sketch | for train
self.class2path_image = dict() # class: set(path) | for image | for train
self.path2class_sketch = dict() # path: class | for sketch | for train
self.path2class_image = dict() # path: class | for image | for train
self.class2path_sketch_test = dict() # class: set(path) | for sketch | for test
self.class2path_image_test = dict() # class: set(path) | for image | for test
self.path2class_sketch_test = dict() # path: class | for sketch | for test
self.path2class_image_test = dict() # path: class | for image | for test
self.load()
def __getitem__(self, index):
sketch = self.load_each_image_use(self.id2path[int(index/2)])
label = np.zeros(1)
if index % 2 == 0:
label[0] = 1
image, path = self.pair_similar(self.path2class_sketch[self.id2path[int(index/2)]])
else:
label[0] = 0
image, path = self.pair_dis_similar(self.path2class_sketch[self.id2path[int(index/2)]])
return sketch, image, label
def __len__(self):
return 2*len(self.id2path)
def load_train(self, batch_size=512):
image = []
sketch = []
label = []
ranges = list(range(2*len(self.id2path)))
random.shuffle(ranges)
for index in ranges:
path_sketch = self.id2path[int(index/2)]
sketch.append(self.load_each_image_use(path_sketch))
if index % 2 == 0:
label.append(1)
image_tmp, path = self.pair_similar(self.path2class_sketch[path_sketch])
else:
label.append(0)
image_tmp, path = self.pair_dis_similar(self.path2class_sketch[path_sketch])
image.append(image_tmp)
if len(image) == batch_size:
yield torch.from_numpy(np.asarray(sketch)), torch.from_numpy(np.asarray(image)), torch.from_numpy(np.asarray(label))
image = []
sketch = []
label = []
yield torch.from_numpy(np.asarray(sketch)), torch.from_numpy(np.asarray(image)), torch.from_numpy(np.asarray(label))
def load_same_class_image(self, batch_size=512):
image = []
sketch = []
label = []
ranges = list(range(2*len(self.id2path)))
random.shuffle(ranges)
for index in ranges:
path_sketch = self.id2path[int(index/2)]
#sketch.append(self.load_each_image_use(path_sketch))
if index % 2 == 0:
label.append(1)
sketch_tmp, path = self.pair_similar(self.path2class_sketch[path_sketch])
image_tmp, path = self.pair_similar(self.path2class_sketch[path_sketch])
else:
label.append(0)
sketch_tmp, path = self.pair_similar(self.path2class_sketch[path_sketch])
image_tmp, path = self.pair_dis_similar(self.path2class_sketch[path_sketch])
sketch.append(sketch_tmp)
image.append(image_tmp)
if len(image) == batch_size:
yield torch.stack(sketch), torch.stack(image), torch.from_numpy(np.asarray(label))
image = []
sketch = []
label = []
yield torch.stack(sketch), torch.stack(image), torch.from_numpy(np.asarray(label))
def pair_similar(self, cls):
path_list = list(self.class2path_image[cls])
path = random.choice(path_list)
return self.load_each_image_use(path), path
def pair_dis_similar(self, cls):
class_list = list(self.class2imgid.keys())
class_list.remove(cls)
path_list = list(self.class2path_image[random.choice(class_list)])
path = random.choice(path_list)
return self.load_each_image_use(path), path
def load(self):
"""
this function will build the self.loaded_image, self.class2imgid,
self.path2class_sketch and self.path2class_image
"""
if os.path.exists(self.loaded_data):
with open(self.loaded_data, 'rb') as f:
preloaded_data = pickle.load(f)
# Train part
self.class2imgid = preloaded_data['class2imgid']
self.path2class_sketch = preloaded_data['path2class_sketch']
self.class2path_sketch = preloaded_data['class2path_sketch']
self.path2class_image = preloaded_data['path2class_image']
self.class2path_image = preloaded_data['class2path_image']
self.id2path = preloaded_data['id2path']
# Test part
self.class2id = preloaded_data['class2id']
self.id2class = TEST_CLASS
self.class2imgid_test = preloaded_data['class2imgid_test']
self.class2path_sketch_test = preloaded_data['class2path_sketch_test']
self.class2path_image_test = preloaded_data['class2path_image_test']
self.path2class_sketch_test = preloaded_data['path2class_sketch_test']
self.path2class_image_test = preloaded_data['path2class_image_test']
# Shared part
self.loaded_image = preloaded_data['loaded_image']
return
self.id2class = TEST_CLASS
self.class2id = dict()
for idx, cls in enumerate(self.id2class):
self.class2id[cls] = idx
self.class2imgid, self.path2class_sketch, self.class2path_sketch, self.path2class_image, self.class2path_image = \
self.load_stats(self.stats_file_train, TRAIN_CLASS, self.sketch_files_train, self.image_files_train)
self.class2imgid_test, self.path2class_sketch_test, self.class2path_sketch_test, self.path2class_image_test, self.class2path_image_test = \
self.load_stats(self.stats_file_test, TEST_CLASS, self.sketch_files_test, self.image_files_test)
for path in self.path2class_sketch.keys():
self.loaded_image[path] = self.load_each_image(path)
self.id2path.append(path)
for path in self.path2class_image.keys():
self.loaded_image[path] = self.load_each_image(path)
for path in self.path2class_sketch_test.keys():
self.loaded_image[path] = self.load_each_image(path)
for path in self.path2class_image_test.keys():
self.loaded_image[path] = self.load_each_image(path)
assert len(self.id2path) == len(self.path2class_sketch.keys())
preloaded_data = dict()
# Train part
preloaded_data['class2imgid'] = self.class2imgid
preloaded_data['path2class_sketch'] = self.path2class_sketch
preloaded_data['class2path_sketch'] = self.class2path_sketch
preloaded_data['path2class_image'] = self.path2class_image
preloaded_data['class2path_image'] = self.class2path_image
preloaded_data['id2path'] = self.id2path
# Test part
preloaded_data['class2id'] = self.class2id
preloaded_data['class2imgid_test'] = self.class2imgid_test
preloaded_data['class2path_sketch_test'] = self.class2path_sketch_test
preloaded_data['class2path_image_test'] = self.class2path_image_test
preloaded_data['path2class_sketch_test'] = self.path2class_sketch_test
preloaded_data['path2class_image_test'] = self.path2class_image_test
# Shared part
preloaded_data['loaded_image'] = self.loaded_image
with open(self.loaded_data, 'wb') as f:
pickle.dump(preloaded_data, f)
return
def load_each_image(self, path):
img = cv2.imread(path)
if img.shape[2] == 1:
img = np.concatenate([img, img, img], 2)
else:
img = img.copy()[:, :, ::-1]
if img.shape != (IMAGE_SIZE, IMAGE_SIZE, 3):
img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE))
return img
def load_each_image_use(self, path):
image = self.loaded_image[path]
image = image.copy()[:,:,::-1]
image = image.reshape(3, IMAGE_SIZE, IMAGE_SIZE)
image = torch.Tensor(image)
image = image/255.0
image = self.Normalize(image)
#print(image)
return image
def load_test_images(self, batch_size=512):
ims = []
label = []
for path in self.path2class_image_test.keys():
ims.append(self.load_each_image_use(path))
label.append(self.path2class_image_test[path])
if len(ims) == batch_size:
yield torch.stack(ims), label
ims = []
label = []
yield torch.stack(ims), label
def load_test_sketch(self, batch_size=512):
ims = []
label = []
for path in self.path2class_sketch_test.keys():
ims.append(self.load_each_image_use(path))
label.append(self.path2class_sketch_test[path])
if len(ims) == batch_size:
yield torch.stack(ims), label
ims = []
label = []
yield torch.stack(ims), label
def load_stats(self, stats_file, part_set, sketch_files, image_files):
class2imgid = dict()
path2class_sketch = dict()
class2path_sketch = dict()
path2class_image = dict()
class2path_image = dict()
with open(stats_file, 'r') as stats_in:
stats_in_reader = csv.reader(stats_in)
_header = next(stats_in_reader)
for line in stats_in_reader:
if line[1] not in class2imgid:
class2imgid[line[1]] = set()
class2imgid[line[1]].add(line[2])
iter = 0
b_time = time.time()
for key, value in class2imgid.items():
iter += 1
for id in value:
pattern = '.*' + id + '.*'
tmp_sketchs = match_filename(pattern, sketch_files)
tmp_images = match_filename(pattern, image_files)
for tmp_sketch in tmp_sketchs:
sketch_files.remove(tmp_sketch)
path2class_sketch[tmp_sketch] = key
if key not in class2path_sketch:
class2path_sketch[key] = set()
class2path_sketch[key].add(tmp_sketch)
for tmp_image in tmp_images:
image_files.remove(tmp_image)
path2class_image[tmp_image] = key
if key not in class2path_image:
class2path_image[key] = set()
class2path_image[key].add(tmp_image)
print('Loaded {}, {}/{}, spend {:.2f} seconds'.format(key, iter, len(part_set), time.time()-b_time))
b_time = time.time()
return class2imgid, path2class_sketch, class2path_sketch, path2class_image, class2path_image
def test(self):
for cls, paths in self.class2path_image.items():
for item in list(paths):
print(cls)
im1 = cv2.imread(item)
cv2.imshow('test', im1)
cv2.waitKey(6000)
cv2.destroyAllWindows()
```
#### File: package/dataset/preprocess.py
```python
import torch
import numpy as np
import os
import sys
import shutil
import csv
import re
from package.dataset.utils import match_filename, TRAIN_CLASS, TEST_CLASS
#from utils import match_filename, TRAIN_CLASS, TEST_CLASS
def split_train_test_zs(stats_file, sketch_dir, image_dir, processed_dir):
stats_train = list()
stats_test = list()
class2sketch_dirlist = dict() # dict -> list
class2image_dirlist = dict() # dict -> list
class_imageid_train = dict() # dixt -> set
class_imageid_test = dict() # dict -> set
stats_train_file = os.path.join(processed_dir, 'stats_train.csv')
stats_test_file = os.path.join(processed_dir, 'stats_test.csv')
sketch_train_dir = os.path.join(processed_dir, 'sketch_train')
sketch_test_dir = os.path.join(processed_dir, 'sketch_test')
image_train_dir = os.path.join(processed_dir, 'photo_train')
image_test_dir = os.path.join(processed_dir, 'photo_test')
if not os.path.exists(sketch_train_dir):
os.makedirs(sketch_train_dir)
if not os.path.exists(sketch_test_dir):
os.makedirs(sketch_test_dir)
if not os.path.exists(image_train_dir):
os.makedirs(image_train_dir)
if not os.path.exists(image_test_dir):
os.makedirs(image_test_dir)
with open(stats_file, 'r') as stats_in:
stats_all_reader = csv.reader(stats_in)
header = next(stats_in)
for line in stats_all_reader:
if line[1] in TEST_CLASS:
if line[1] not in class_imageid_test:
class_imageid_test[line[1]] = set()
class_imageid_test[line[1]].add(line[2])
stats_test.append(line)
else:
if line[1] not in class_imageid_train:
class_imageid_train[line[1]] = set()
class_imageid_train[line[1]].add(line[2])
stats_train.append(line)
with open(stats_train_file, 'w') as stats_out:
stats_train_writer = csv.writer(stats_out)
stats_train_writer.writerow(header)
for line in stats_train:
stats_train_writer.writerow(line)
with open(stats_test_file, 'w') as stats_out:
stats_test_writer = csv.writer(stats_out)
stats_test_writer.writerow(header)
for line in stats_test:
stats_test_writer.writerow(line)
for item in list(TEST_CLASS | TRAIN_CLASS):
class2image_dirlist[item] = os.listdir(os.path.join(image_dir, item.replace(' ', '_')))
class2sketch_dirlist[item] = os.listdir(os.path.join(sketch_dir, item.replace(' ', '_')))
for class_name in TEST_CLASS:
for id_name in class_imageid_test[class_name]:
pattern = id_name + '*'
id_name_imagelist = match_filename(pattern, class2image_dirlist[class_name])
id_name_sketchlist = match_filename(pattern, class2sketch_dirlist[class_name])
for item in id_name_imagelist:
shutil.copy(os.path.join(image_dir, class_name.replace(' ', '_'), item), \
image_test_dir)
for item in id_name_sketchlist:
shutil.copy(os.path.join(sketch_dir, class_name.replace(' ', '_'), item), \
sketch_test_dir)
for class_name in TRAIN_CLASS:
for id_name in class_imageid_train[class_name]:
pattern = id_name + '*'
id_name_imagelist = match_filename(pattern, class2image_dirlist[class_name])
id_name_sketchlist = match_filename(pattern, class2sketch_dirlist[class_name])
for item in id_name_imagelist:
shutil.copy(os.path.join(image_dir, class_name.replace(' ', '_'), item), \
image_train_dir)
for item in id_name_sketchlist:
shutil.copy(os.path.join(sketch_dir, class_name.replace(' ', '_'), item), \
sketch_train_dir)
if __name__ == '__main__':
stats_file = './data/info/stats.csv'
sketch_dir = './data/256x256/sketch/tx_000100000000/'
image_dir = './data/256x256/photo/tx_000100000000/'
processed_dir = './data/preprocessed/'
split_train_test_zs(stats_file, sketch_dir, image_dir, processed_dir)
```
#### File: package/loss/regularization.py
```python
import torch
import torch.nn as nn
class _Regularization(nn.Module):
def __init__(self,model,weight_decay,logger=None,p=2):
'''
:param model
:param weight_decay
:param p: p=0 -> L2, p=1 -> L1
'''
super(_Regularization, self).__init__()
if weight_decay <= 0:
print("param weight_decay can not <=0")
exit(0)
self.model=model
self.logger=logger
self.weight_decay=weight_decay
self.p=p
self.weight_list=self.get_weight(model)
self.weight_info(self.weight_list)
def forward(self):
self.weight_list=self.get_weight(self.model)
reg_loss = self.regularization_loss(self.weight_list, self.weight_decay, p=self.p)
return reg_loss
def get_weight(self,model):
'''
only need to penalize the weight term install of the bias term
:param model:
:return:
'''
weight_list = []
for name, param in model.named_parameters():
if 'weight' in name and param.requires_grad == True:
weight = (name, param)
weight_list.append(weight)
return weight_list
def regularization_loss(self,weight_list, weight_decay, p=2):
'''
:param weight_list:
:param p:
:param weight_decay:
:return: regularization loss
'''
reg_loss=0
for _, w in weight_list:
l2_reg = torch.norm(w, p=p)
reg_loss = reg_loss + l2_reg
reg_loss=weight_decay*reg_loss
return reg_loss
def weight_info(self,weight_list):
'''
:param weight_list:
'''
if self.logger is None:
return
self.logger.info("---------------regularization weight---------------")
for name, _ in weight_list:
self.logger.info("\t{}".format(name))
```
#### File: package/model/cmd_translate_adv.py
```python
import math
import torch
import torch.nn as nn
import torch.functional as F
import torch.optim as optim
from package.model.variational_dropout import VariationalDropout
from package.model.vgg import vgg16
from package.loss.triplet_loss import _Triplet_loss, _Ranking_loss
class GaussianNoiseLayer(nn.Module):
def __init__(self, mean=0.0, std=0.2):
super(GaussianNoiseLayer, self).__init__()
self.mean = mean
self.std = std
def forward(self, x):
if self.training:
noise = x.data.new(x.size()).normal_(self.mean, self.std)
noise = noise.to(x.device)
x = x + noise
return x
class Discriminator(nn.Module):
def __init__(self, in_dim=300, out_dim=1, noise=True, use_batchnorm=True, use_dropout=False,
use_sigmoid=False):
super(Discriminator, self).__init__()
in_dim = in_dim * 2
hid_dim = int(in_dim / 2)
modules = list()
if noise:
modules.append(GaussianNoiseLayer(mean=0.0, std=0.3))
modules.append(nn.Linear(in_dim, hid_dim))
if use_batchnorm:
modules.append(nn.BatchNorm1d(hid_dim))
modules.append(nn.LeakyReLU(0.2, inplace=True))
if use_dropout:
modules.append(nn.Dropout(p=0.5))
modules.append(nn.Linear(hid_dim, hid_dim))
if use_batchnorm:
modules.append(nn.BatchNorm1d(hid_dim))
modules.append(nn.LeakyReLU(0.2, inplace=True))
if use_dropout:
modules.append(nn.Dropout(p=0.5))
modules.append(nn.Linear(hid_dim, out_dim))
if use_sigmoid:
modules.append(nn.Sigmoid())
self.disc = nn.Sequential(*modules)
def forward(self, x, y):
return self.disc(torch.cat([x,y],-1))
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True):
super(GANLoss, self).__init__()
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
# Get soft and noisy labels
if target_is_real:
target_tensor = 0.7 + 0.3 * torch.rand(input.size(0))
else:
target_tensor = 0.3 * torch.rand(input.size(0))
target_tensor = target_tensor.to(input.device)
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input.squeeze(), target_tensor)
class Encoder(nn.Module):
"""
This is a default encoder/decoder to map features from one domain to another one.
This general encoder contains several layers of MLP with Leaky ReLU as activation function. Dropout is also added during training
"""
def __init__(self, input_size, output_size, dropout_prob):
super(Encoder, self).__init__()
self.mid_size = 2048
encoder = [nn.Linear(input_size, self.mid_size),
nn.BatchNorm1d(self.mid_size),
nn.ReLU(inplace=True),
nn.Dropout(dropout_prob),
nn.Linear(self.mid_size, output_size),
nn.BatchNorm1d(output_size),
nn.ReLU(inplace=True), ]
# encoder = [nn.Linear(input_size, self.mid_size),
# nn.BatchNorm1d(self.mid_size),
# nn.ReLU(inplace=True),
# nn.Dropout(dropout_prob),
# nn.Linear(self.mid_size, self.mid_size),
# nn.BatchNorm1d(self.mid_size),
# nn.ReLU(inplace=True),
# nn.Dropout(dropout_prob),
# nn.Linear(self.mid_size, output_size),
# nn.BatchNorm1d(output_size),
# nn.ReLU(inplace=True)]
self.encoder = nn.Sequential(*encoder)
def forward(self, features):
out_feature = self.encoder(features)
return out_feature
class Decoder(nn.Module):
"""
This is a default encoder/decoder to map features from one domain to another one.
This general encoder contains several layers of MLP with Leaky ReLU as activation function. Dropout is also added during training
"""
def __init__(self, input_size, output_size):
super(Decoder, self).__init__()
self.mid_size = 2048
decoder = [nn.Linear(input_size, self.mid_size),
nn.ReLU(inplace=True),
nn.Linear(self.mid_size, output_size),
nn.ReLU(inplace=True)]
# decoder = [nn.Linear(input_size, self.mid_size),
# nn.ReLU(inplace=True),
# nn.Linear(self.mid_size, self.mid_size),
# nn.ReLU(inplace=True),
# nn.Linear(self.mid_size, output_size),
# nn.ReLU(inplace=True)]
self.decoder = nn.Sequential(*decoder)
def forward(self, features):
out_feature = self.decoder(features)
return out_feature
class Variational_Sampler(nn.Module):
"""
Variational sampler for image apperance features
"""
def __init__(self, hidden_size):
super(Variational_Sampler, self).__init__()
self.hidden_size = hidden_size
self.mean_encoder = nn.Sequential(nn.Linear(hidden_size, hidden_size))
self.logvar_encoder = nn.Sequential(nn.Linear(hidden_size, hidden_size))
def reparameterize(self, mean, logvar):
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return mean + eps * std
def lossfn(self, mean, logvar):
return torch.mean(-0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp(), dim=1))
def forward(self, x):
x_mean = self.mean_encoder(x)
x_logvar = self.logvar_encoder(x)
z = self.reparameterize(x_mean, x_logvar)
loss = self.lossfn(x_mean, x_logvar)
return z, loss
class MSE(nn.Module):
def __init__(self):
super(MSE, self).__init__()
def forward(self, x_true, x_pred):
return torch.mean(torch.pow(x_pred-x_true, 2), dim=-1)
class CosineDistance(nn.Module):
def __init__(self, dim=-1):
self.dim = dim
def forward(self, input1, input2):
"""
batch_size * hidden_dim
"""
assert input1.shape == input2.shape
num = torch.sum(input1*input2, dim=self.dim)
denorm = torch.sqrt(torch.sum(torch.pow(input1, 2), dim=self.dim)) * torch.sqrt(torch.sum(torch.pow(input2, 2), dim=self.dim))
cosine_distance = 1 - num/denorm
return cosine_distance
class CMDTrans_model(nn.Module):
"""
The overall model of our zero-shot sketch based image retrieval using cross-modal domain translation
"""
def __init__(self, pca_size, raw_size, hidden_size, semantic_size, pretrain_embedding, dropout_prob=0.3, lr=2e-4, momentum=0.9,
fix_embedding=True, seman_dist='cosine', triplet_dist='l2', margin1=0, margin2=10, logger=None):
super(CMDTrans_model, self).__init__()
# Dist Matrics
self.l2_dist = nn.PairwiseDistance(p=2)
self.mse = MSE()
self.cosine = nn.CosineSimilarity(dim=-1)
if seman_dist == 'l2':
self.seman_dist = self.l2_dist
elif seman_dist == 'cosine':
self.seman_dist = self.cosine
else:
raise ValueError('The seman_dist should be l2 or cosine')
if triplet_dist == 'l2':
self.triplet_dist = self.l2_dist
elif triplet_dist == 'cosine':
self.triplet_dist = self.cosine
else:
raise ValueError('The triplet_dist should be l2 or cosine')
# Modules
self.sketch_encoder = Encoder(raw_size, hidden_size, dropout_prob)
self.image_encoder_S = Encoder(raw_size, hidden_size, dropout_prob)
self.image_encoder_A = Encoder(raw_size, hidden_size, dropout_prob)
self.variational_sample = Variational_Sampler(hidden_size)
self.sketch_decoder = Decoder(hidden_size, raw_size)
self.image_decoder = Decoder(hidden_size*2, raw_size)
#self.image_decoder = Decoder(hidden_size+raw_size, raw_size)
#self.image_decoder = Decoder(hidden_size*2+raw_size, raw_size)
# Loss
self.triplet_loss = _Ranking_loss(self.triplet_dist, margin1, margin2)
self.gan_loss = GANLoss(use_lsgan=True)
# Activation
self.relu = nn.ReLU(inplace=True)
# Discriminator
self.disc_sk = Discriminator(in_dim=raw_size, noise=True, use_batchnorm=True)
self.disc_im = Discriminator(in_dim=raw_size, noise=True, use_batchnorm=True)
# Optimizer
self.optimizer_gen = optim.Adam(list(self.sketch_encoder.parameters()) + list(self.image_encoder_S.parameters()) +
list(self.image_encoder_A.parameters()) + list(self.variational_sample.parameters()) +
list(self.sketch_decoder.parameters()) + list(self.image_decoder.parameters()),
lr=lr)
self.optimizer_disc = optim.SGD(list(self.disc_sk.parameters()) + list(self.disc_im.parameters()), lr=0.01, momentum=momentum)
def forward(self, sketch, image_pair, image_unpair, image_n):
"""
image_p [batch_size, pca_size]
image_n [batch_size, pca_size]
sketch [batch_size, pca_size]
semantics [batch_size, 1]
"""
# relu all
sketch = self.relu(sketch)
image_pair = self.relu(image_pair)
image_unpair = self.relu(image_unpair)
image_n = self.relu(image_n)
# recode size info
_batch_size = image_pair.shape[0]
_raw_size = image_pair.shape[1]
# model
sketch_encode_feature = self.sketch_encoder(sketch)
image_n_encode_feature_s = self.image_encoder_S(image_n)
image_unpaired_encode_feature_s = self.image_encoder_S(image_unpair)
image_paired_encode_feature_s = self.image_encoder_S(image_pair)
image_paired_encode_feature_a = self.image_encoder_A(image_pair)
image_paired_encode_feature_a_resampled, kl_loss = self.variational_sample(image_paired_encode_feature_a) # kl loss(1)
image_paired_sketch_feature_combine = torch.cat([image_paired_encode_feature_a_resampled, sketch_encode_feature], dim=1)
#image_paired_sketch_feature_combine = torch.cat([image_paired_encode_feature_a_resampled, sketch], dim=1)
#image_paired_sketch_feature_combine = torch.cat([image_paired_encode_feature_a_resampled, sketch_encode_feature, sketch], dim=1)
image_translate = self.image_decoder(image_paired_sketch_feature_combine)
sketch_translate = self.sketch_decoder(image_paired_encode_feature_s)
# loss
triplet_loss = self.triplet_loss(sketch_encode_feature, image_paired_encode_feature_s, image_unpaired_encode_feature_s, image_n_encode_feature_s) # triplet loss(3)
image_translate_loss = torch.mean(self.mse(image_translate, image_pair)) # image loss(5)
sketch_translate_loss = torch.mean(self.mse(sketch_translate, sketch)) # sketch loss(6)
orthogonality_loss = torch.mean(self.cosine(image_paired_encode_feature_s, image_paired_encode_feature_a)) # orthogonality loss(4)
image_dis = self.gan_loss(self.disc_im(image_translate, image_unpair), True)
sketch_dis = self.gan_loss(self.disc_sk(sketch_translate, sketch), True)
loss_disc_sk = self.gan_loss(self.disc_sk(sketch, sketch), True) + self.gan_loss(self.disc_sk(sketch_translate, sketch), False)
loss_disc_im = self.gan_loss(self.disc_im(image_unpair, image_unpair), True) + self.gan_loss(self.disc_im(image_translate, image_unpair), False)
loss_gen = dict()
loss_gen['kl'] = kl_loss
loss_gen['triplet'] = triplet_loss
loss_gen['orthogonality'] = orthogonality_loss
loss_gen['image'] = image_translate_loss
loss_gen['sketch'] = sketch_translate_loss
loss_gen['image_dis'] = image_dis
loss_gen['sketch_dis'] = sketch_dis
loss_dis = dict()
loss_dis['sketch_dis'] = loss_disc_sk
loss_dis['image_dis'] = loss_disc_im
return loss_gen, loss_dis
def inference_structure(self, x, mode):
"""
map sketch and image to structure space
"""
x = self.relu(x)
if mode=='image':
return self.image_encoder_S(x)
elif mode=='sketch':
return self.sketch_encoder(x)
else:
raise ValueError('The mode must be image or sketch')
def inference_appearance(self, x):
"""
map image to appearance space
"""
x = self.relu(x)
return self.image_encoder_A(x)
def inference_generation(self, x, sample_times=200):
x = self.relu(x)
x_hidden = self.sketch_encoder(x)
generated = list()
for _ in range(sample_times):
eps = torch.randn_like(x_hidden)
z = torch.cat([eps, x_hidden], dim=1)
#z = torch.cat([eps, x], dim=1)
#z = torch.cat([eps, x_hidden, x], dim=1)
image_translate = self.image_decoder(z)
generated.append(image_translate)
generated = torch.mean(torch.stack(generated, dim=-1),dim=-1)
return generated
```
#### File: package/model/cmd_translate.py
```python
import math
import torch
import torch.nn as nn
import torch.functional as F
from package.model.variational_dropout import VariationalDropout
from package.model.vgg import vgg16
from package.loss.triplet_loss import _Triplet_loss, _Ranking_loss
class Encoder(nn.Module):
"""
This is a default encoder/decoder to map features from one domain to another one.
This general encoder contains several layers of MLP with Leaky ReLU as activation function. Dropout is also added during training
"""
def __init__(self, input_size, output_size, dropout_prob):
super(Encoder, self).__init__()
self.mid_size = 2048
encoder = [nn.Linear(input_size, self.mid_size),
nn.BatchNorm1d(self.mid_size),
nn.ReLU(inplace=True),
nn.Dropout(dropout_prob),
nn.Linear(self.mid_size, output_size),
nn.BatchNorm1d(output_size),
nn.ReLU(inplace=True), ]
# encoder = [nn.Linear(input_size, self.mid_size),
# nn.BatchNorm1d(self.mid_size),
# nn.ReLU(inplace=True),
# nn.Dropout(dropout_prob),
# nn.Linear(self.mid_size, self.mid_size),
# nn.BatchNorm1d(self.mid_size),
# nn.ReLU(inplace=True),
# nn.Dropout(dropout_prob),
# nn.Linear(self.mid_size, output_size),
# nn.BatchNorm1d(output_size),
# nn.ReLU(inplace=True)]
self.encoder = nn.Sequential(*encoder)
def forward(self, features):
out_feature = self.encoder(features)
return out_feature
class Decoder(nn.Module):
"""
This is a default encoder/decoder to map features from one domain to another one.
This general encoder contains several layers of MLP with Leaky ReLU as activation function. Dropout is also added during training
"""
def __init__(self, input_size, output_size):
super(Decoder, self).__init__()
self.mid_size = 2048
decoder = [nn.Linear(input_size, self.mid_size),
nn.ReLU(inplace=True),
nn.Linear(self.mid_size, output_size),
nn.ReLU(inplace=True)]
# decoder = [nn.Linear(input_size, self.mid_size),
# nn.ReLU(inplace=True),
# nn.Linear(self.mid_size, self.mid_size),
# nn.ReLU(inplace=True),
# nn.Linear(self.mid_size, output_size),
# nn.ReLU(inplace=True)]
self.decoder = nn.Sequential(*decoder)
def forward(self, features):
out_feature = self.decoder(features)
return out_feature
class Variational_Sampler(nn.Module):
"""
Variational sampler for image apperance features
"""
def __init__(self, hidden_size):
super(Variational_Sampler, self).__init__()
self.hidden_size = hidden_size
self.mean_encoder = nn.Sequential(nn.Linear(hidden_size, hidden_size))
self.logvar_encoder = nn.Sequential(nn.Linear(hidden_size, hidden_size))
def reparameterize(self, mean, logvar):
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return mean + eps * std
def lossfn(self, mean, logvar):
return torch.mean(-0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp(), dim=1))
def forward(self, x):
x_mean = self.mean_encoder(x)
x_logvar = self.logvar_encoder(x)
z = self.reparameterize(x_mean, x_logvar)
loss = self.lossfn(x_mean, x_logvar)
return z, loss
class MSE(nn.Module):
def __init__(self):
super(MSE, self).__init__()
def forward(self, x_true, x_pred):
return torch.mean(torch.pow(x_pred-x_true, 2), dim=-1)
class CosineDistance(nn.Module):
def __init__(self, dim=-1):
self.dim = dim
def forward(self, input1, input2):
"""
batch_size * hidden_dim
"""
assert input1.shape == input2.shape
num = torch.sum(input1*input2, dim=self.dim)
denorm = torch.sqrt(torch.sum(torch.pow(input1, 2), dim=self.dim)) * torch.sqrt(torch.sum(torch.pow(input2, 2), dim=self.dim))
cosine_distance = 1 - num/denorm
return cosine_distance
class CMDTrans_model(nn.Module):
"""
The overall model of our zero-shot sketch based image retrieval using cross-modal domain translation
"""
def __init__(self, pca_size, raw_size, hidden_size, semantic_size, pretrain_embedding, dropout_prob=0.3,
fix_embedding=True, seman_dist='cosine', triplet_dist='l2', margin1=0, margin2=10, logger=None):
super(CMDTrans_model, self).__init__()
# Dist Matrics
self.l2_dist = nn.PairwiseDistance(p=2)
self.mse = MSE()
self.cosine = nn.CosineSimilarity(dim=-1)
if seman_dist == 'l2':
self.seman_dist = self.l2_dist
elif seman_dist == 'cosine':
self.seman_dist = self.cosine
else:
raise ValueError('The seman_dist should be l2 or cosine')
if triplet_dist == 'l2':
self.triplet_dist = self.l2_dist
elif triplet_dist == 'cosine':
self.triplet_dist = self.cosine
else:
raise ValueError('The triplet_dist should be l2 or cosine')
# Modules
self.sketch_encoder = Encoder(raw_size, hidden_size, dropout_prob)
self.image_encoder_S = Encoder(raw_size, hidden_size, dropout_prob)
self.image_encoder_A = Encoder(raw_size, hidden_size, dropout_prob)
self.variational_sample = Variational_Sampler(hidden_size)
self.sketch_decoder = Decoder(hidden_size, raw_size)
self.image_decoder = Decoder(hidden_size*2, raw_size)
#self.image_decoder = Decoder(hidden_size+raw_size, raw_size)
#self.image_decoder = Decoder(hidden_size*2+raw_size, raw_size)
# Loss
self.triplet_loss = _Ranking_loss(self.triplet_dist, margin1, margin2)
# Activation
self.relu = nn.ReLU(inplace=True)
def forward(self, sketch, image_pair, image_unpair, image_n):
"""
image_p [batch_size, pca_size]
image_n [batch_size, pca_size]
sketch [batch_size, pca_size]
semantics [batch_size, 1]
"""
# recode size info
batch_size = image_pair.shape[0]
_raw_size = image_pair.shape[1]
# model
sketch_encode_feature = self.sketch_encoder(sketch)
image_n_encode_feature_s = self.image_encoder_S(image_n)
image_unpaired_encode_feature_s = self.image_encoder_S(image_unpair)
image_paired_encode_feature_s = self.image_encoder_S(image_pair)
image_paired_encode_feature_a = self.image_encoder_A(image_pair)
image_paired_encode_feature_a_resampled, kl_loss = self.variational_sample(image_paired_encode_feature_a) # kl loss(1)
image_paired_sketch_feature_combine = torch.cat([image_paired_encode_feature_a_resampled, sketch_encode_feature], dim=1)
#image_paired_sketch_feature_combine = torch.cat([image_paired_encode_feature_a_resampled, sketch], dim=1)
#image_paired_sketch_feature_combine = torch.cat([image_paired_encode_feature_a_resampled, sketch_encode_feature, sketch], dim=1)
image_translate = self.image_decoder(image_paired_sketch_feature_combine)
sketch_translate = self.sketch_decoder(image_paired_encode_feature_s)
# loss
triplet_loss = self.triplet_loss(sketch_encode_feature, image_paired_encode_feature_s, image_unpaired_encode_feature_s, image_n_encode_feature_s) # triplet loss(3)
image_translate_loss = torch.mean(self.mse(image_translate, image_pair)) # image loss(5)
sketch_translate_loss = torch.mean(self.mse(sketch_translate, sketch)) # sketch loss(6)
orthogonality_loss = torch.mean(self.cosine(image_paired_encode_feature_s, image_paired_encode_feature_a)) # orthogonality loss(4)
loss = dict()
loss['kl'] = kl_loss
loss['triplet'] = triplet_loss
loss['orthogonality'] = orthogonality_loss
loss['image'] = image_translate_loss
loss['sketch'] = sketch_translate_loss
return loss
def inference_structure(self, x, mode):
"""
map sketch and image to structure space
"""
x = self.relu(x)
if mode=='image':
return self.image_encoder_S(x)
elif mode=='sketch':
return self.sketch_encoder(x)
else:
raise ValueError('The mode must be image or sketch')
def inference_generation(self, x, sample_times=200):
x = self.relu(x)
x_hidden = self.sketch_encoder(x)
generated = list()
for _ in range(sample_times):
eps = torch.randn_like(x_hidden)
z = torch.cat([eps, x_hidden], dim=1)
#z = torch.cat([eps, x], dim=1)
#z = torch.cat([eps, x_hidden, x], dim=1)
image_translate = self.image_decoder(z)
generated.append(image_translate)
generated = torch.mean(torch.stack(generated, dim=-1),dim=-1)
return generated
```
#### File: package/model/pcyc.py
```python
import os
import numpy as np
# torch
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import models
import torch.nn.functional as F
from package.model.vgg import vgg16, vgg16_bn
from package.loss.pcyc_loss import _GAN_Loss
class VGGNetFeats(nn.Module):
def __init__(self, pretrained=True, finetune=True):
super(VGGNetFeats, self).__init__()
model = vgg16(pretrained=pretrained, return_type=0)
# model = models.vgg16(pretrained=pretrained)
for param in model.parameters():
param.requires_grad = finetune
self.features = model.features
self.classifier = nn.Sequential(
*list(model.classifier.children())[:-1],
nn.Linear(4096, 512)
)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0), -1))
return x
class Generator(nn.Module):
def __init__(self, in_dim=512, out_dim=300, noise=True, use_batchnorm=True, use_dropout=False):
super(Generator, self).__init__()
hid_dim = int((in_dim + out_dim) / 2)
modules = list()
modules.append(nn.Linear(in_dim, hid_dim))
if use_batchnorm:
modules.append(nn.BatchNorm1d(hid_dim))
modules.append(nn.LeakyReLU(0.2, inplace=True))
if noise:
modules.append(GaussianNoiseLayer(mean=0.0, std=0.2))
if use_dropout:
modules.append(nn.Dropout(p=0.5))
modules.append(nn.Linear(hid_dim, hid_dim))
if use_batchnorm:
modules.append(nn.BatchNorm1d(hid_dim))
modules.append(nn.LeakyReLU(0.2, inplace=True))
if noise:
modules.append(GaussianNoiseLayer(mean=0.0, std=0.2))
if use_dropout:
modules.append(nn.Dropout(p=0.5))
modules.append(nn.Linear(hid_dim, out_dim))
self.gen = nn.Sequential(*modules)
def forward(self, x):
return self.gen(x)
class GaussianNoiseLayer(nn.Module):
def __init__(self, mean=0.0, std=0.2):
super(GaussianNoiseLayer, self).__init__()
self.mean = mean
self.std = std
def forward(self, x):
if self.training:
noise = x.data.new(x.size()).normal_(self.mean, self.std)
if x.is_cuda:
noise = noise.cuda()
x = x + noise
return x
class Discriminator(nn.Module):
def __init__(self, in_dim=300, out_dim=1, noise=True, use_batchnorm=True, use_dropout=False,
use_sigmoid=False):
super(Discriminator, self).__init__()
hid_dim = int(in_dim / 2)
modules = list()
if noise:
modules.append(GaussianNoiseLayer(mean=0.0, std=0.3))
modules.append(nn.Linear(in_dim, hid_dim))
if use_batchnorm:
modules.append(nn.BatchNorm1d(hid_dim))
modules.append(nn.LeakyReLU(0.2, inplace=True))
if use_dropout:
modules.append(nn.Dropout(p=0.5))
modules.append(nn.Linear(hid_dim, hid_dim))
if use_batchnorm:
modules.append(nn.BatchNorm1d(hid_dim))
modules.append(nn.LeakyReLU(0.2, inplace=True))
if use_dropout:
modules.append(nn.Dropout(p=0.5))
modules.append(nn.Linear(hid_dim, out_dim))
if use_sigmoid:
modules.append(nn.Sigmoid())
self.disc = nn.Sequential(*modules)
def forward(self, x):
if not x.is_cuda:
x = x.cuda()
return self.disc(x)
class AutoEncoder(nn.Module):
def __init__(self, dim=300, hid_dim=300, nlayer=1):
super(AutoEncoder, self).__init__()
steps_down = np.linspace(dim, hid_dim, num=nlayer + 1, dtype=np.int).tolist()
modules = []
for i in range(nlayer):
modules.append(nn.Linear(steps_down[i], steps_down[i + 1]),)
modules.append(nn.ReLU(inplace=True))
self.enc = nn.Sequential(*modules)
steps_up = np.linspace(hid_dim, dim, num=nlayer + 1, dtype=np.int).tolist()
modules = []
for i in range(nlayer):
modules.append(nn.Linear(steps_up[i], steps_up[i + 1]))
modules.append(nn.ReLU(inplace=True))
self.dec = nn.Sequential(*modules)
def forward(self, x):
xenc = self.enc(x)
xrec = self.dec(xenc)
return xenc, xrec
class PCYC(nn.Module):
def __init__(self, num_clss, args):
super(PCYC, self).__init__()
# Dimension of embedding
self.dim_enc = args.dim_enc
# Number of classes
self.num_clss = num_clss
# Sketch model: pre-trained on ImageNet
self.sketch_model = VGGNetFeats(pretrained=True, finetune=False)
# self.load_weight(self.sketch_model, args.path_sketch_model, 'sketch')
# Image model: pre-trained on ImageNet
self.image_model = VGGNetFeats(pretrained=True, finetune=False)
# Generators
# Sketch to semantic generator
self.gen_sk2se = Generator(in_dim=512, out_dim=self.dim_enc, noise=False, use_dropout=True)
# Image to semantic generator
self.gen_im2se = Generator(in_dim=512, out_dim=self.dim_enc, noise=False, use_dropout=True)
# Semantic to sketch generator
self.gen_se2sk = Generator(in_dim=self.dim_enc, out_dim=512, noise=False, use_dropout=True)
# Semantic to image generator
self.gen_se2im = Generator(in_dim=self.dim_enc, out_dim=512, noise=False, use_dropout=True)
# Discriminators
# Common semantic discriminator
self.disc_se = Discriminator(in_dim=self.dim_enc, noise=True, use_batchnorm=True)
# Sketch discriminator
self.disc_sk = Discriminator(in_dim=512, noise=True, use_batchnorm=True)
# Image discriminator
self.disc_im = Discriminator(in_dim=512, noise=True, use_batchnorm=True)
# Classifiers
self.classifier_sk = nn.Linear(512, self.num_clss, bias=False)
self.classifier_im = nn.Linear(512, self.num_clss, bias=False)
self.classifier_se = nn.Linear(self.dim_enc, self.num_clss, bias=False)
for param in self.classifier_sk.parameters():
param.requires_grad = False
for param in self.classifier_im.parameters():
param.requires_grad = False
for param in self.classifier_se.parameters():
param.requires_grad = False
# print('Done')
# Optimizers
# print('Defining optimizers...', end='')
self.lr = args.lr
self.gamma = 0.1
self.momentum = 0.9
self.optimizer_gen = optim.Adam(list(self.gen_sk2se.parameters()) + list(self.gen_im2se.parameters()) +
list(self.gen_se2sk.parameters()) + list(self.gen_se2im.parameters()),
lr=self.lr)
self.optimizer_disc = optim.SGD(list(self.disc_se.parameters()) + list(self.disc_sk.parameters()) +
list(self.disc_im.parameters()), lr=self.lr, momentum=self.momentum)
self.scheduler_gen = optim.lr_scheduler.MultiStepLR(self.optimizer_gen, milestones=[],
gamma=self.gamma)
self.scheduler_disc = optim.lr_scheduler.MultiStepLR(self.optimizer_disc, milestones=[],
gamma=self.gamma)
# print('Done')
# Loss function
# print('Defining losses...', end='')
self.lambda_se = args.lambda_se
self.lambda_im = args.lambda_im
self.lambda_sk = args.lambda_sk
self.lambda_gen_cyc = args.lambda_gen_cyc
self.lambda_gen_adv = args.lambda_gen_adv
self.lambda_gen_cls = args.lambda_gen_cls
self.lambda_gen_reg = args.lambda_gen_reg
self.lambda_disc_se = args.lambda_disc_se
self.lambda_disc_sk = args.lambda_disc_sk
self.lambda_disc_im = args.lambda_disc_im
self.lambda_regular = args.lambda_regular
self.criterion_gan = _GAN_Loss(use_lsgan=True)
self.criterion_cyc = nn.L1Loss()
self.criterion_cls = nn.CrossEntropyLoss()
self.criterion_reg = nn.MSELoss()
def forward(self, sk, im):
# vgg feats
if sk is not None:
self.sk_fe = self.sketch_model(sk)
self.sk2se_em = self.gen_sk2se(self.sk_fe)
if im is not None:
self.im_fe = self.image_model(im)
self.im2se_em = self.gen_im2se(self.im_fe) # Y -> S
if sk is None:
return self.im2se_em
if im is None:
return self.sk2se_em
# Reconstruct original examples for cycle consistency
self.im_em_hat = self.gen_se2im(self.im2se_em) # Y -> S -> Y'
self.sk_em_hat = self.gen_se2sk(self.sk2se_em)
def backward(self, cl):
# ranking loss
loss_gen_adv = self.criterion_gan(self.disc_se(self.im2se_em), True) + \
self.criterion_gan(self.disc_se(self.sk2se_em), True)
loss_gen_adv = self.lambda_gen_adv * loss_gen_adv
# Cycle consistency loss
loss_gen_cyc = self.lambda_im * self.criterion_cyc(self.im_em_hat, self.im_fe) + \
self.lambda_sk * self.criterion_cyc(self.sk_em_hat, self.sk_fe)
loss_gen_cyc = self.lambda_gen_cyc * loss_gen_cyc
# Classification loss
loss_gen_cls = self.lambda_se * (self.criterion_cls(self.classifier_se(self.im2se_em), cl) +
self.criterion_cls(self.classifier_se(self.sk2se_em), cl))
loss_gen_cls = self.lambda_gen_cls * loss_gen_cls
# Regression loss
# self.criterion_reg = nn.MSELoss()
loss_gen_reg = self.lambda_se * (self.criterion_reg(self.im2se_em, self.sk2se_em) +
self.criterion_reg(self.sk2se_em, self.im2se_em))
loss_gen_reg = self.lambda_gen_reg * loss_gen_reg
# Sum the above generator losses for back propagation and displaying
loss_gen = loss_gen_cyc + loss_gen_cls + loss_gen_reg
self.optimizer_gen.zero_grad()
loss_gen.backward(retain_graph=True)
self.optimizer_gen.step()
# initialize optimizer
self.optimizer_disc.zero_grad()
# Semantic discriminator loss
# Sketch discriminator loss
loss_disc_sk = self.criterion_gan(self.disc_sk(self.sk_fe), True)
loss_disc_sk = self.lambda_disc_sk * loss_disc_sk
loss_disc_sk.backward(retain_graph=True)
# Image discriminator loss
loss_disc_im = self.criterion_gan(self.disc_im(self.im_fe), True)
loss_disc_im = self.lambda_disc_im * loss_disc_im
loss_disc_im.backward()
# Optimizer step
self.optimizer_disc.step()
# Sum the above discriminator losses for displaying
loss_disc = loss_disc_sk + loss_disc_im
loss = {'gen_adv': loss_gen_adv, 'gen_cyc': loss_gen_cyc, 'gen_cls': loss_gen_cls,
'gen_reg': loss_gen_reg, 'gen': loss_gen, 'disc_sk': loss_disc_sk, 'disc_im':
loss_disc_im, 'disc': loss_disc}
return loss_gen_adv, loss_gen, loss_disc
def optimize_params(self, sk, im, cl):
# Forward pass
self.forward(sk, im)
# Backward pass
loss = self.backward(cl)
return loss
def get_sketch_embeddings(self, sk):
# sketch embedding
sk_em = self.gen_sk2se(self.sketch_model(sk))
return sk_em
def get_image_embeddings(self, im):
# image embedding
im_em = self.gen_im2se(self.image_model(im))
return im_em
``` |
{
"source": "JianGuanTHU/cotk",
"score": 2
} |
#### File: cotk/metric/recorder.py
```python
r"""
Containing some recorders.
"""
import numpy as np
from .metric import MetricBase
class SingleTurnDialogRecorder(MetricBase):
'''A metric-like class for recording generated sentences and references.
Arguments:
{MetricBase.DATALOADER_ARGUMENTS}
post_allvocabs_key (str): The key of dialog posts with :ref:`allvocabs <vocab_ref>`.
Default: ``post_allvocabs``.
resp_allvocabs_key (str): The key of dialog responses with :ref:`allvocabs <vocab_ref>`.
Default: ``resp_allvocabs``.
{MetricBase.GEN_KEY_ARGUMENTS}
'''
def __init__(self, dataloader, post_allvocabs_key="post_allvocabs", \
resp_allvocabs_key="resp_allvocabs", gen_key="gen"):
super().__init__()
self.dataloader = dataloader
self.post_allvocabs_key = post_allvocabs_key
self.resp_allvocabs_key = resp_allvocabs_key
self.gen_key = gen_key
self.post_list = []
self.resp_list = []
self.gen_list = []
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys:
{MetricBase.FORWARD_POST_ALLVOCABS_ARGUMENTS}
{MetricBase.FORWARD_RESP_ALLVOCABS_ARGUMENTS}
{MetricBase.FORWARD_GEN_ARGUMENTS}
Here is an example for data:
>>> # all_vocab_list = ["<pad>", "<unk>", "<go>", "<eos>", "I", "have",
>>> # "been", "to", "China"]
>>> data = {
... post_allvocabs_key: [[2,4,3], [2,5,6,3]]
... resp_allvocabs_key: [[2,5,4,3], [2,6,3]]
... gen_key: [[6,7,8,3], [4,5,3]]
... }
'''
super().forward(data)
post_allvocabs = data[self.post_allvocabs_key]
resp_allvocabs = data[self.resp_allvocabs_key]
gen = data[self.gen_key]
if not isinstance(post_allvocabs, (np.ndarray, list)):
raise TypeError("Unknown type for post_allvocabs.")
if not isinstance(resp_allvocabs, (np.ndarray, list)):
raise TypeError("Unknown type for resp_allvocabs")
if not isinstance(gen, (np.ndarray, list)):
raise TypeError("Unknown type for gen")
if len(post_allvocabs) != len(resp_allvocabs) or len(resp_allvocabs) != len(gen):
raise ValueError("Batch num is not matched.")
for i, post_sen in enumerate(post_allvocabs):
self.post_list.append(self.dataloader.convert_ids_to_tokens(post_sen[1:]))
self.resp_list.append(self.dataloader.convert_ids_to_tokens(resp_allvocabs[i][1:]))
self.gen_list.append(self.dataloader.convert_ids_to_tokens(gen[i]))
def close(self):
'''
Returns:
(dict): Return a dict which contains
* **post**: a list of post sentences. A jagged 2-d array of int.
Size:``[batch_size, ~sent_length]``, where "~" means different
sizes in this dimension is allowed.
* **resp**: a list of response sentences. A jagged 2-d array of int.
Size:``[batch_size, ~sent_length]``, where "~" means different
sizes in this dimension is allowed.
* **gen**: A list of generated sentences. A jagged 2-d array of int.
Size:``[batch_size, ~sent_length]``, where "~" means different
sizes in this dimension is allowed.
'''
res = super().close()
res.update({"post": self.post_list, "resp": self.resp_list, "gen": self.gen_list})
return res
class MultiTurnDialogRecorder(MetricBase):
'''A metric-like class for recording generated sentences and references.
Arguments:
{MetricBase.DATALOADER_ARGUMENTS}
multi_turn_reference_allvocabs_key (str): The key of dialog references with
:ref:`allvocabs <vocab_ref>`. Default: ``multi_turn_ref_allvocabs``.
{MetricBase.MULTI_TURN_GEN_KEY_ARGUMENTS}
{MetricBase.MULTI_TURN_LENGTH_KEY_ARGUMENTS}
'''
def __init__(self, dataloader,
multi_turn_reference_allvocabs_key="multi_turn_ref_allvocabs", \
multi_turn_gen_key="multi_turn_gen", \
turn_len_key="turn_length"):
super().__init__()
self.dataloader = dataloader
self.multi_turn_reference_allvocabs_key = multi_turn_reference_allvocabs_key
self.multi_turn_gen_key = multi_turn_gen_key
self.turn_len_key = turn_len_key
self.context_list = []
self.reference_list = []
self.gen_list = []
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys:
{MetricBase.FORWARD_MULTI_TURN_REFERENCE_ALLVOCABS_ARGUMENTS}
{MetricBase.FORWARD_MULTI_TURN_GEN_ARGUMENTS}
{MetricBase.FORWARD_MULTI_TURN_LENGTH_ARGUMENTS}
Here is an example for data:
>>> # all_vocab_list = ["<pad>", "<unk>", "<go>", "<eos>", "I", "have",
>>> # "been", "to", "China"]
>>> data = {
... multi_turn_context_allvocabs_key: [[[2,4,3], [2,5,6,3]], [[2,7,6,8,3]]]
... multi_turn_reference_allvocabs_key: [[[2,6,7,3], [2,5,3]], [[2,7,6,8,3]]]
... multi_turn_gen_key: [[[6,7,8,3], [4,5,3]], [[7,3]]]
... turn_len_key: [2,1]
... }
'''
super().forward(data)
reference_allvocabs = data[self.multi_turn_reference_allvocabs_key]
gen = data[self.multi_turn_gen_key]
turn_length = data[self.turn_len_key]
if not isinstance(reference_allvocabs, (np.ndarray, list)):
raise TypeError("Unknown type for reference_allvocabs")
if not isinstance(gen, (np.ndarray, list)):
raise TypeError("Unknown type for gen")
if not isinstance(turn_length, (np.ndarray, list)):
raise TypeError("Unknown type for turn_length")
if len(turn_length) != len(reference_allvocabs) or \
len(turn_length) != len(gen):
raise ValueError("Batch num is not matched.")
for i, _ in enumerate(reference_allvocabs):
self.reference_list.append(self.dataloader.convert_multi_turn_ids_to_tokens( \
np.array(reference_allvocabs[i]), turn_length=turn_length[i], ignore_first_token=True))
self.gen_list.append(self.dataloader.convert_multi_turn_ids_to_tokens( \
np.array(gen[i]), turn_length=turn_length[i]))
if len(self.reference_list[-1]) != len(self.gen_list[-1]):
raise ValueError("Reference turn num %d != gen turn num %d." % \
(len(self.reference_list[-1]), len(self.gen_list[-1])))
def close(self):
'''
Returns:
(dict): Return a dict which contains
* **reference**: a list of response sentences. A jagged 3-d array of int.
Size:``[batch_size, ~turn_length, ~sent_length]``, where "~" means different
sizes in this dimension is allowed.
* **gen**: a list of generated sentences. A jagged 3-d array of int.
Size:``[batch_size, ~turn_length, ~sent_length]``, where "~" means different
sizes in this dimension is allowed.
'''
res = super().close()
res.update({"reference": self.reference_list, "gen": self.gen_list})
return res
class LanguageGenerationRecorder(MetricBase):
'''A metric-like class for recorder generated sentences.
Arguments:
{MetricBase.DATALOADER_ARGUMENTS}
{MetricBase.GEN_KEY_ARGUMENTS}
'''
def __init__(self, dataloader, gen_key="gen"):
super().__init__()
self.dataloader = dataloader
self.gen_key = gen_key
self.gen_list = []
def forward(self, data):
'''Processing a batch of data.
Arguments:
data (dict): A dict at least contains the following keys:
{MetricBase.FORWARD_GEN_ARGUMENTS}
Here is an example for data:
>>> # all_vocab_list = ["<pad>", "<unk>", "<go>", "<eos>", "I", "have",
>>> # "been", "to", "China"]
>>> data = {
... gen_key: [[6,7,8,3], [4,5,3]]
... }
'''
super().forward(data)
gen = data[self.gen_key]
if not isinstance(gen, (np.ndarray, list)):
raise TypeError("Unknown type for gen")
for sen in gen:
self.gen_list.append(self.dataloader.convert_ids_to_tokens(sen))
def close(self):
'''
Returns:
(dict): Return a dict which contains
* **gen**: a list of generated sentences. A jagged 2-d array of int.
Size:``[batch_size, ~sent_length]``, where "~" means different
sizes in this dimension is allowed.
'''
res = super().close()
res.update({"gen": self.gen_list})
return res
```
#### File: cotk/_utils/hooks.py
```python
from inspect import signature
import pkg_resources
#pylint: disable=global-statement
hooks_listener = []
def invoke_listener(method, *argv):
r'''invoke listener with method'''
global hooks_listener
for listener in hooks_listener:
getattr(listener, method)(*argv)
def hook_dataloader(fn):
r'''decorator for dataloader.__init___'''
sign = signature(fn)
def wrapped(*args, **kwargs):
binded = sign.bind(*args, **kwargs)
binded.apply_defaults()
binded = dict(binded.arguments)
self = binded['self']
del binded['self']
invoke_listener("add_dataloader", self, fn.__qualname__.split(".")[0], binded)
return fn(*args, **kwargs)
return wrapped
def hook_standard_metric(metric_name=""):
r'''decorator for dataloader.get_metric'''
def decorator(fn):
sign = signature(fn)
def wrapped(*args, **kwargs):
binded = sign.bind(*args, **kwargs)
binded.apply_defaults()
binded = dict(binded.arguments)
self = binded['self']
del binded['self']
invoke_listener("add_standard_metric", self, fn.__qualname__.split(".")[0], metric_name, binded)
print(kwargs)
return fn(*args, **kwargs)
return wrapped
return decorator
def hook_wordvec(fn):
r'''decorator for wordvec.__init__'''
sign = signature(fn)
def wrapped(*args, **kwargs):
binded = sign.bind(*args, **kwargs)
binded.apply_defaults()
binded = dict(binded.arguments)
self = binded['self']
del binded['self']
invoke_listener("add_wordvec", self, fn.__qualname__.split(".")[0], binded)
return fn(*args, **kwargs)
return wrapped
class BaseHooksListener:
r'''An abstract class implement the basic hook listener'''
def add_dataloader(self, dataloader, args):
pass
def add_standard_metric(self, dataloader, metric_type, args):
pass
def add_wordvec(self, wordvec, args):
pass
class SimpleHooksListener(BaseHooksListener):
r'''An simple recorder'''
def __init__(self):
self.record = {
"cotk_version": pkg_resources.require("cotk")[0].version,
"dataloader": {},
"standard_metric": {},
"wordvec": {}
}
self.dataloader_id = {}
def close(self):
return self.record
def add_dataloader(self, obj, dataloader, args):
id = len(self.dataloader_id)
args["id"] = id
self.record["dataloader"][dataloader] = args
print(obj)
self.dataloader_id[obj] = id
print(self.dataloader_id)
def add_standard_metric(self, obj, clsname, metric_type, args):
print(obj)
print(self.dataloader_id)
args['dataloader_id'] = self.dataloader_id[obj]
self.record["standard_metric"][clsname + "_" + metric_type] = args
def add_wordvec(self, obj, wordvec, args):
self.record["wordvec"][wordvec] = args
def start_recorder():
r'''Start recorder'''
global hooks_listener
hooks_listener.clear()
hooks_listener.append(SimpleHooksListener())
def close_recorder():
r'''Close recorder and return the recorded information.'''
global hooks_listener
assert len(hooks_listener) == 1
return hooks_listener[0].close()
``` |
{
"source": "Jianguo188/LeetCode-Py",
"score": 4
} |
#### File: Templates/04.Queue/Queue-PriorityQueue.py
```python
class Heapq:
# 堆调整方法:调整为大顶堆
def heapAdjust(self, nums: [int], index: int, end: int):
left = index * 2 + 1
right = left + 1
while left <= end:
# 当前节点为非叶子结点
max_index = index
if nums[left] > nums[max_index]:
max_index = left
if right <= end and nums[right] > nums[max_index]:
max_index = right
if index == max_index:
# 如果不用交换,则说明已经交换结束
break
nums[index], nums[max_index] = nums[max_index], nums[index]
# 继续调整子树
index = max_index
left = index * 2 + 1
right = left + 1
# 将数组构建为二叉堆
def heapify(self, nums: [int]):
size = len(nums)
# (size - 2) // 2 是最后一个非叶节点,叶节点不用调整
for i in range((size - 2) // 2, -1, -1):
# 调用调整堆函数
self.heapAdjust(nums, i, size - 1)
# 入队操作
def heappush(self, nums: list, value):
nums.append(value)
size = len(nums)
i = size - 1
# 寻找插入位置
while (i - 1) // 2 >= 0:
cur_root = (i - 1) // 2
# value 小于当前根节点,则插入到当前位置
if nums[cur_root] > value:
break
# 继续向上查找
nums[i] = nums[cur_root]
i = cur_root
# 找到插入位置或者到达根位置,将其插入
nums[i] = value
# 出队操作
def heappop(self, nums: list) -> int:
size = len(nums)
nums[0], nums[-1] = nums[-1], nums[0]
# 得到最大值(堆顶元素)然后调整堆
top = nums.pop()
if size > 0:
self.heapAdjust(nums, 0, size - 2)
return top
# 升序堆排序
def heapSort(self, nums: [int]):
self.heapify(nums)
size = len(nums)
for i in range(size):
nums[0], nums[size - i - 1] = nums[size - i - 1], nums[0]
self.heapAdjust(nums, 0, size - i - 2)
return nums
nums = [49, 38, 65, 97, 76, 13, 27, 49]
heap = Heapq()
# 1. 创建堆,并进行堆排序
heap.heapSort(nums)
heap.heapify(nums)
# 2. 测试 heappop()
rst = heap.heappop(nums)
print(rst)
rst = heap.heappop(nums)
print(rst)
rst = heap.heappop(nums)
print(rst)
rst = heap.heappop(nums)
print(rst)
rst = heap.heappop(nums)
print(rst)
rst = heap.heappop(nums)
print(rst)
# 3. 测试 heappush()
nums = [49, 38, 65, 97, 76, 13, 27, 49]
heapList = []
for num in nums:
heap.heappush(heapList, num)
print(heapList)
# 4. 堆排序
rst = heap.heapSort(heapList)
print(heapList)
``` |
{
"source": "jianguoz/DNNC-few-shot-intent",
"score": 2
} |
#### File: DNNC-few-shot-intent/models/emb_knn.py
```python
from sentence_transformers import SentenceTransformer, losses, SentencesDataset
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import scipy
import math
import os
import torch
from torch.utils.data import DataLoader
from .utils import DisableLogger, get_logger
ENTAILMENT = 1.0
NON_ENTAILMENT = 0.0
logger = get_logger(__name__)
class EmbKnn:
def __init__(self,
path: str,
args):
self.args = args
self.device = torch.device("cuda:0" if torch.cuda.is_available() and not self.args.no_cuda else "cpu")
with DisableLogger():
if path is not None and os.path.exists(path):
self.model = SentenceTransformer(path)
elif 'roberta' in self.args.bert_model:
self.model = SentenceTransformer('roberta-base-nli-stsb-mean-tokens')
else:
self.model = SentenceTransformer('bert-base-nli-mean-tokens')
self.model.to(self.device)
self.cached_embeddings = None
def save(self, dir_path):
self.model.save(dir_path)
def cache(self, example_sentences):
self.model.eval()
self.cached_embeddings = self.model.encode(example_sentences, show_progress_bar = False)
def encode(self,
text):
self.model.eval()
query_embeddings = self.model.encode(text, show_progress_bar = False)
return torch.FloatTensor(query_embeddings)
def predict(self,
text):
assert self.cached_embeddings is not None
self.model.eval()
query_embeddings = self.model.encode(text, show_progress_bar = False)
distances = scipy.spatial.distance.cdist(query_embeddings, self.cached_embeddings, "cosine")
distances = 1.0-distances
return torch.FloatTensor(distances)
def train(self, train_examples, dev_examples, dir_path = None):
train_examples = SentencesDataset(train_examples, self.model)
dev_examples = SentencesDataset(dev_examples, self.model)
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=self.args.train_batch_size)
dev_dataloader = DataLoader(dev_examples, shuffle=False, batch_size=self.args.eval_batch_size)
train_loss = losses.CosineSimilarityLoss(model=self.model)
evaluator = EmbeddingSimilarityEvaluator(dev_dataloader)
warmup_steps = math.ceil(len(train_examples)*self.args.num_train_epochs/self.args.train_batch_size*self.args.warmup_proportion)
self.model.zero_grad()
self.model.train()
self.model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=self.args.num_train_epochs,
evaluation_steps=10000,
warmup_steps=warmup_steps,
output_path=None,
optimizer_params = {'lr': self.args.learning_rate, 'eps': 1e-6, 'correct_bias': False})
```
#### File: jianguoz/DNNC-few-shot-intent/train_classifier.py
```python
import argparse
from tqdm import tqdm
import random
import os
import json
from collections import defaultdict
from models.classifier import Classifier
from models.utils import InputExample
from models.utils import load_intent_datasets, load_intent_examples, sample, print_results
from models.utils import calc_oos_precision, calc_in_acc, calc_oos_recall, calc_oos_f1
from models.utils import THRESHOLDS
import time
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--seed",
default=42,
type=int,
help="Random seed")
parser.add_argument("--bert_model",
default='roberta-base',
type=str,
help="BERT model")
parser.add_argument("--train_batch_size",
default=15,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=25.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--no_cuda",
action='store_true', #Store_true: false
help="Whether not to use CUDA when available")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--max_grad_norm', help='gradient clipping for Max gradient norm.', required=False, default=1.0,
type=float)
parser.add_argument('--label_smoothing',
type = float,
default = 0.1,
help = 'Coefficient for label smoothing (default: 0.1, if 0.0, no label smoothing)')
parser.add_argument('--max_seq_length',
type = int,
default = 128,
help = 'Maximum number of paraphrases for each sentence')
parser.add_argument("--do_lower_case",
action='store_true',
help="Whether to lowercase input string")
# Special params
parser.add_argument('--train_file_path',
type = str,
default = None,
help = 'Training data path')
parser.add_argument('--dev_file_path',
type = str,
default = None,
help = 'Validation data path')
parser.add_argument('--oos_dev_file_path',
type = str,
default = None,
help = 'Out-of-Scope validation data path')
parser.add_argument('--output_dir',
type = str,
default = None,
help = 'Output file path')
parser.add_argument('--save_model_path',
type=str,
default='',
help='path to save the model checkpoints')
parser.add_argument('--few_shot_num',
type = int,
default = 5,
help = 'Number of training examples for each class')
parser.add_argument('--num_trials',
type = int,
default = 10,
help = 'Number of trials to see robustness')
parser.add_argument("--do_predict",
action='store_true',
help="do_predict the model")
parser.add_argument("--do_final_test",
action='store_true',
help="do_predict the model")
args = parser.parse_args()
random.seed(args.seed)
N = args.few_shot_num
T = args.num_trials
train_file_path = args.train_file_path
dev_file_path = args.dev_file_path
train_examples, dev_examples = load_intent_datasets(train_file_path, dev_file_path, args.do_lower_case)
sampled_tasks = [sample(N, train_examples) for i in range(T)]
if args.oos_dev_file_path is not None:
oos_dev_examples = load_intent_examples(args.oos_dev_file_path, args.do_lower_case)
else:
oos_dev_examples = []
label_lists = []
intent_train_examples = []
intent_dev_examples = []
intent_oos_dev_examples = []
for i in range(T):
tasks = sampled_tasks[i]
label_lists.append([])
intent_train_examples.append([])
intent_dev_examples.append([InputExample(e.text, None, e.label) for e in dev_examples])
intent_oos_dev_examples.append([InputExample(e.text, None, None) for e in oos_dev_examples])
for task in tasks:
label = task['task']
examples = task['examples']
label_lists[-1].append(label)
for j in range(len(examples)):
intent_train_examples[-1].append(InputExample(examples[j], None, label))
if args.output_dir is not None:
folder_name = '{}/{}-shot-{}/'.format(args.output_dir, N, args.bert_model)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
file_name = 'batch_{}---epoch_{}---lr_{}'.format(args.train_batch_size, args.num_train_epochs, args.learning_rate)
file_name = '{}__oos-threshold'.format(file_name)
if args.do_final_test:
file_name = file_name + '_TEST.txt'
else:
file_name = file_name + '.txt'
f = open(folder_name+file_name, 'w')
else:
f = None
for j in range(T):
save_model_path = '{}_{}'.format(folder_name + args.save_model_path, j + 1)
if os.path.exists(save_model_path):
assert args.do_predict
else:
assert not args.do_predict
if args.save_model_path and os.path.exists(save_model_path):
model = Classifier(path = save_model_path,
label_list = label_lists[j],
args = args)
else:
model = Classifier(path = None,
label_list = label_lists[j],
args = args)
model.train(intent_train_examples[j])
if args.save_model_path:
if not os.path.exists(save_model_path):
os.mkdir(save_model_path)
model.save(save_model_path)
in_domain_preds = model.evaluate(intent_dev_examples[j])
oos_preds = model.evaluate(intent_oos_dev_examples[j])
in_acc = calc_in_acc(dev_examples, in_domain_preds, THRESHOLDS)
oos_recall = calc_oos_recall(oos_preds, THRESHOLDS)
oos_prec = calc_oos_precision(in_domain_preds, oos_preds, THRESHOLDS)
oos_f1 = calc_oos_f1(oos_recall, oos_prec)
print_results(THRESHOLDS, in_acc, oos_recall, oos_prec, oos_f1)
if f is not None:
for i in range(len(in_acc)):
f.write('{},{},{},{} '.format(in_acc[i], oos_recall[i], oos_prec[i], oos_f1[i]))
f.write('\n')
if f is not None:
f.close()
if __name__ == '__main__':
main()
``` |
{
"source": "jianguozhouzunyimedicaluniversity/loon",
"score": 2
} |
#### File: src/loon/skeleton.py
```python
import os
import sys
import argparse
import logging
if __package__ == '' or __package__ is None: # Use for test
from __init__ import __version__, __author__, __license__
from classes import Host, PBS
from tool import batch
else:
from loon import __version__, __author__, __license__
from loon.classes import Host, PBS
from loon.tool import batch
_logger = logging.getLogger(__name__)
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(description="Be an efficient loon.")
# Show version info
parser.add_argument(
"-V",
"--version",
action="version",
version="loon {ver} released under {license} license.".format(
ver=__version__, license=__license__.upper()))
parser.add_argument(
"--author",
action="version",
help="show info of program's author",
version="Author: 王诗翔 Email: <EMAIL> GitHub: @{author}".
format(author=__author__))
# Common arguments for host
host_parent_parser = argparse.ArgumentParser(add_help=False)
host_parent_parser.add_argument(
'-N',
'--name',
dest="name",
help='Host alias, default is value from -U',
type=str)
host_parent_parser.add_argument('-U',
'--username',
dest='username',
help='Username for remote host',
type=str)
host_parent_parser.add_argument(
'-H',
'--host',
dest='host',
help='IP address for remote host (e.g. 192.168.0.1)',
type=str)
host_parent_parser.add_argument('-P',
'--port',
dest="port",
help='Port for remote host, default is 22',
type=int,
default=22)
# Common arguments for all commands
verbose_parser = argparse.ArgumentParser(add_help=False)
verbose_parser.add_argument("-v",
"--verbose",
dest="loglevel",
help="set loglevel to INFO",
action="store_const",
const=logging.INFO)
verbose_parser.add_argument('--dry',
help="Dry run the commands",
action='store_true')
# Subcommands
subparsers = parser.add_subparsers(
title='subcommands',
#description='valid subcommands',
help="description",
dest="subparsers_name")
# Create the parser for the "add" command
parser_add = subparsers.add_parser(
'add',
help="Add a remote host",
parents=[host_parent_parser, verbose_parser])
parser_add.add_argument('-A',
'--active',
dest='switch_active',
help='Set new host as active host',
action='store_true')
# Create the parser for the "add" command
parser_del = subparsers.add_parser(
'delete',
help="Delete a remote host",
parents=[host_parent_parser, verbose_parser])
# Create the parser for the "switch" command
parser_switch = subparsers.add_parser(
'switch',
help="Switch active remote host",
parents=[host_parent_parser, verbose_parser])
# Create the parser for the "list" command
parser_list = subparsers.add_parser('list',
help="List all remote hosts",
parents=[verbose_parser])
# Create the parser for the "rename" command
parser_rename = subparsers.add_parser('rename',
help="Rename host alias",
parents=[verbose_parser])
parser_rename.add_argument('old', help="Old host alias", type=str)
parser_rename.add_argument('new', help="New host alias", type=str)
# Create the parser for the "run" command
parser_run = subparsers.add_parser(
'run',
help='Run commands or scripts on remote',
parents=[verbose_parser])
parser_run.add_argument(
nargs='+',
dest='commands',
help=
"Commands/scripts to run, special symbol or option should be quoted, e.g. 'ls -l ~', 'ls -l'"
)
parser_run.add_argument('-f',
'--file',
dest='run_file',
help='Run scripts instead of commands',
action='store_true')
parser_run.add_argument(
'--data',
help=('Include a data directory when run local scripts'),
type=str,
required=False)
parser_run.add_argument(
'--remote',
dest='remote_file',
help='Scripts are directly from the active remote host',
action='store_true')
parser_run.add_argument(
'--dir',
help=
'Remote directory for storing local scripts. Only used when flag --file sets and --remote does not set. Default is /tmp',
default='/tmp')
parser_run.add_argument(
'--prog',
help=
'Specified program to run scripts, if not set, scripts will be executed directly assuming shbang exist',
required=False)
# Create the parser for the "upload" command
parser_upload = subparsers.add_parser(
'upload',
help='Upload files to active remote host',
parents=[verbose_parser])
parser_upload.add_argument('source',
nargs='+',
help='Source files to upload')
parser_upload.add_argument('destination',
help="Remote destination directory",
type=str)
parser_upload.add_argument('--rsync',
help="Use rsync instead of scp",
action='store_true')
# Create the parser for the "download" command
parser_download = subparsers.add_parser(
'download',
help='Download files from active remote host',
parents=[verbose_parser])
parser_download.add_argument('source',
nargs='+',
help='Source files to download')
parser_download.add_argument(
'destination',
help=
"Local destination directory, note '~' should be quoted in some cases",
type=str)
parser_download.add_argument('--rsync',
help="Use rsync instead of scp",
action='store_true')
# Create the parser for the "gen" command
parser_gen = subparsers.add_parser(
'gen',
help='Generate a batch of (script) files',
parents=[verbose_parser])
parser_gen.add_argument('-t',
'--template',
help="A template file containing placeholders")
parser_gen.add_argument(
'-s',
'--samplefile',
help=
"A csv file containing unique filenames (the first column) and replacing labels"
)
parser_gen.add_argument(
'-m',
'--mapfile',
help=
"A csv file containing placeholders and column index (0-based) indicating replacing labels in samplefile"
)
parser_gen.add_argument('-o', '--output', help="Output directory")
# Create the parser for the "batch" command
parser_batch = subparsers.add_parser(
'batch',
help="Batch process commands with placeholders",
parents=[verbose_parser])
parser_batch.add_argument(
'-f',
'--file',
help=
r'A structed file/stdin like CSV, TSV etc. Each column is placeholder target, i.e. {0} targets the first column, {1} targets the second column, etc.',
type=argparse.FileType('r'),
default=sys.stdin,
nargs='?')
# parser_batch.add_argument(
# '-o',
# '--output',
# help=
# r'default is stdout, can be a file',
# type=argparse.FileType('w'),
# default=sys.stdout,
# nargs='?')
parser_batch.add_argument(
'-s',
'--sep',
help=r"File separator, ',' for CSV (default) and '\t' for TSV",
default=',',
required=False)
parser_batch.add_argument('-T',
'--thread',
help="Thread number, default is 1",
required=False,
default=1,
type=int)
parser_batch.add_argument('--header',
help="Set it if input file contains header",
action='store_true')
parser_batch.add_argument('cmds',
type=str,
help="A sample command with placeholders")
# Create the parser for the "pbstemp" command
parser_pbstemp = subparsers.add_parser('pbstemp',
help='Generate a PBS template file',
parents=[verbose_parser])
parser_pbstemp.add_argument(
'-i',
'--input',
help='A template file, if not set, a default template is used',
type=str,
required=False)
parser_pbstemp.add_argument('-o',
'--output',
help="Output file, default is work.pbs",
type=str,
required=False)
# Create the parser for the "pbsgen" command
parser_pbsgen = subparsers.add_parser(
'pbsgen',
help='Generate a batch of PBS files (with .pbs extension)',
parents=[verbose_parser])
parser_pbsgen.add_argument(
'-t', '--template', help="A PBS template file containing placeholders")
parser_pbsgen.add_argument(
'-s',
'--samplefile',
help=
"A csv file containing unique filenames (the first column) and replacing labels"
)
parser_pbsgen.add_argument(
'-m',
'--mapfile',
help=
"A csv file containing placeholders and column index (0-based) indicating replacing labels in samplefile"
)
parser_pbsgen.add_argument('-o', '--output', help="Output directory")
# Create the parser for the "pbsgen_example" command
parser_genexample = subparsers.add_parser(
'pbsgen_example',
help='Generate example files for pbsgen command',
parents=[verbose_parser])
parser_genexample.add_argument('output', help='Output directory')
# Create the parser for the "pbssub" command
parser_pbssub = subparsers.add_parser('pbssub',
help='Submit PBS tasks',
parents=[verbose_parser])
parser_pbssub.add_argument(
'--remote',
dest='remote_file',
help='PBS task files are located at the active remote host',
action='store_true')
parser_pbssub.add_argument(
'--workdir',
help=
'Working directory, default is /tmp for remote host and otherwise the command executed path',
required=False)
parser_pbssub.add_argument(
nargs='+',
dest='tasks',
help="Tasks to submit, can be a directory containing only PBS files")
# Create the parser for the "pbsdeploy" command
parser_deploy = subparsers.add_parser(
'pbsdeploy',
help='Deploy target destination to remote host',
parents=[verbose_parser])
parser_deploy.add_argument(
'target', help='Target directory containing PBS files and more')
parser_deploy.add_argument(
'destination',
help=
"Local destination directory, note '~' should be quoted in some cases",
nargs='?')
parser_deploy.add_argument('--rsync',
help="Use rsync instead of scp",
action='store_true')
# Create the parser for the "pbscheck" command
parser_pbscheck = subparsers.add_parser(
'pbscheck',
help='Check status of PBS job on remote host',
parents=[verbose_parser])
parser_pbscheck.add_argument(
'job_id',
help="ID of job, if not set, all running jobs will be returned",
type=str,
nargs='?')
return parser.parse_args(args), parser
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(level=loglevel,
stream=sys.stdout,
format=logformat,
datefmt="%Y-%m-%d %H:%M:%S")
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args_bk = args
args, parser = parse_args(args)
if len(args_bk) == 0:
parser.print_help()
parser.exit()
setup_logging(args.loglevel)
_logger.info("Starting loon...")
host = Host()
pbs = PBS()
if hasattr(args, 'rsync') and args.rsync:
use_rsync = True
else:
use_rsync = False
# Deparse arguments
if args.subparsers_name == 'add':
_logger.info("Add command is detected.")
if args.username is None or args.host is None:
print("Error: username and host are both required in add command.")
sys.exit(1)
if args.name is None:
args.name = args.username
host.add(name=args.name,
username=args.username,
host=args.host,
port=args.port,
dry_run=args.dry)
if args.switch_active:
host.switch(name=args.name,
username=args.username,
host=args.host,
port=args.port,
dry_run=args.dry)
elif args.subparsers_name == 'delete':
_logger.info("Delete command is detected.")
if args.username is None or args.host is None:
if args.name is None:
print("Error: either specify name or both username and host")
sys.exit(1)
host.delete(name=args.name,
username=args.username,
host=args.host,
port=args.port,
dry_run=args.dry)
elif args.subparsers_name == 'switch':
_logger.info("Switch command is detected.")
if args.username is None or args.host is None:
if args.name is None:
print("Error: either specify name or both username and host")
sys.exit(1)
host.switch(name=args.name,
username=args.username,
host=args.host,
port=args.port,
dry_run=args.dry)
elif args.subparsers_name == 'list':
_logger.info("List command is detected.")
host.list()
elif args.subparsers_name == 'rename':
_logger.info("Rename command is detected.")
host.rename(args.old, args.new, dry_run=args.dry)
elif args.subparsers_name == 'run':
_logger.info("Run command is detected.")
if args.run_file:
commands = args.commands
else:
commands = " ".join(args.commands)
host.cmd(commands,
_logger=_logger,
run_file=args.run_file,
data_dir=args.data,
remote_file=args.remote_file,
dir=args.dir,
prog=args.prog,
dry_run=args.dry)
elif args.subparsers_name == 'upload':
_logger.info("Upload command is detected.")
#host.connect(open_channel=False)
host.upload(args.source,
args.destination,
_logger=_logger,
use_rsync=use_rsync,
dry_run=args.dry)
elif args.subparsers_name == 'download':
_logger.info("Download command is detected.")
#host.connect(open_channel=False)
host.download(args.source,
args.destination,
_logger=_logger,
use_rsync=use_rsync,
dry_run=args.dry)
elif args.subparsers_name == 'batch':
_logger.info("Batch command is detected.")
batch(args.file,
args.cmds,
sep=args.sep,
thread=args.thread,
header=args.header,
dry_run=args.dry,
_logger=_logger)
elif args.subparsers_name == 'pbstemp':
_logger.info("pbstemp command is detected.")
pbs.gen_template(args.input, args.output, dry_run=args.dry)
elif args.subparsers_name == 'gen':
_logger.info("gen command is detected.")
pbs.gen_pbs(args.template,
args.samplefile,
args.mapfile,
args.output,
_logger=_logger,
pbs_mode=False,
dry_run=args.dry)
elif args.subparsers_name == 'pbsgen':
_logger.info("pbsgen command is detected.")
pbs.gen_pbs(args.template,
args.samplefile,
args.mapfile,
args.output,
_logger=_logger,
dry_run=args.dry)
elif args.subparsers_name == 'pbsgen_example':
pbs.gen_pbs_example(args.output, _logger=_logger, dry_run=args.dry)
elif args.subparsers_name == 'pbssub':
_logger.info("pbssub command is detected.")
pbs.sub(host,
args.tasks,
args.remote_file,
args.workdir,
_logger=_logger,
dry_run=args.dry)
elif args.subparsers_name == 'pbsdeploy':
_logger.info("pbsdeploy command is detected.")
pbs.deploy(host,
args.target,
args.destination,
_logger=_logger,
use_rsync=use_rsync,
dry_run=args.dry)
elif args.subparsers_name == 'pbscheck':
_logger.info("pbscheck command is detected.")
pbs.check(host, args.job_id, dry_run=args.dry)
_logger.info("loon ends here")
def run():
"""Entry point for console_scripts"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
``` |
{
"source": "jiangwei1995910/cnocr",
"score": 2
} |
#### File: cnocr/fit/fit.py
```python
import logging
import os
import mxnet as mx
def _load_model(args, rank=0):
if 'load_epoch' not in args or args.load_epoch is None:
return (None, None, None)
assert args.prefix is not None
model_prefix = args.prefix
if rank > 0 and os.path.exists("%s-%d-symbol.json" % (model_prefix, rank)):
model_prefix += "-%d" % (rank)
sym, arg_params, aux_params = mx.model.load_checkpoint(
model_prefix, args.load_epoch)
logging.info('Loaded model %s_%04d.params', model_prefix, args.load_epoch)
return (sym, arg_params, aux_params)
def fit(network, data_train, data_val, metrics, args, hp, data_names=None):
if args.gpu:
contexts = [mx.context.gpu(i) for i in range(args.gpu)]
else:
contexts = [mx.context.cpu(i) for i in range(args.cpu)]
sym, arg_params, aux_params = _load_model(args)
if sym is not None:
assert sym.tojson() == network.tojson()
if not os.path.exists(os.path.dirname(args.prefix)):
os.makedirs(os.path.dirname(args.prefix))
module = mx.mod.Module(
symbol = network,
data_names= ["data"] if data_names is None else data_names,
label_names=['label'],
context=contexts)
module.fit(train_data=data_train,
eval_data=data_val,
begin_epoch=args.load_epoch if args.load_epoch else 0,
num_epoch=hp.num_epoch,
# use metrics.accuracy or metrics.accuracy_lcs
eval_metric=mx.metric.np(metrics.accuracy, allow_extra_outputs=True),
optimizer='AdaDelta',
optimizer_params={'learning_rate': hp.learning_rate,
# 'momentum': hp.momentum,
'wd': 0.00001,
},
initializer=mx.init.Xavier(factor_type="in", magnitude=2.34),
arg_params=arg_params,
aux_params=aux_params,
batch_end_callback=mx.callback.Speedometer(hp.batch_size, 50),
epoch_end_callback=mx.callback.do_checkpoint(args.prefix),
)
``` |
{
"source": "jiangwei221/COTR-1",
"score": 2
} |
#### File: COTR/datasets/megadepth_dataset.py
```python
import os
import json
import random
from collections import namedtuple
import numpy as np
from COTR.datasets import colmap_helper
from COTR.global_configs import dataset_config
from COTR.sfm_scenes import knn_search
from COTR.utils import debug_utils, utils, constants
SceneCapIndex = namedtuple('SceneCapIndex', ['scene_index', 'capture_index'])
def prefix_of_img_path_for_magedepth(img_path):
'''
get the prefix for image of megadepth dataset
'''
prefix = os.path.abspath(os.path.join(img_path, '../../../..')) + '/'
return prefix
class MegadepthSceneDataBase():
scenes = {}
knn_engine_dict = {}
@classmethod
def _load_scene(cls, opt, scene_dir_dict):
if scene_dir_dict['scene_dir'] not in cls.scenes:
if opt.info_level == 'rgb':
assert 0
elif opt.info_level == 'rgbd':
scene_dir = scene_dir_dict['scene_dir']
images_dir = scene_dir_dict['image_dir']
depth_dir = scene_dir_dict['depth_dir']
scene = colmap_helper.ColmapWithDepthAsciiReader.read_sfm_scene_given_valid_list_path(scene_dir, images_dir, depth_dir, dataset_config[opt.dataset_name]['valid_list_json'], opt.crop_cam)
if opt.use_ram:
scene.read_data_to_ram(['image', 'depth'])
else:
raise ValueError()
knn_engine = knn_search.ReprojRatioKnnSearch(scene)
cls.scenes[scene_dir_dict['scene_dir']] = scene
cls.knn_engine_dict[scene_dir_dict['scene_dir']] = knn_engine
else:
pass
class MegadepthDataset():
def __init__(self, opt, dataset_type):
assert dataset_type in ['train', 'val', 'test']
assert len(opt.scenes_name_list) > 0
self.opt = opt
self.dataset_type = dataset_type
self.use_ram = opt.use_ram
self.scenes_name_list = opt.scenes_name_list
self.scenes = None
self.knn_engine_list = None
self.total_caps_set = None
self.query_caps_set = None
self.db_caps_set = None
self.img_path_to_scene_cap_index_dict = {}
self.scene_index_to_db_caps_mask_dict = {}
self._load_scenes()
@property
def num_scenes(self):
return len(self.scenes)
@property
def num_queries(self):
return len(self.query_caps_set)
@property
def num_db(self):
return len(self.db_caps_set)
def get_scene_cap_index_by_index(self, index):
assert index < len(self.query_caps_set)
img_path = sorted(list(self.query_caps_set))[index]
scene_cap_index = self.img_path_to_scene_cap_index_dict[img_path]
return scene_cap_index
def _get_common_subset_caps_from_json(self, json_path, total_caps):
prefix = prefix_of_img_path_for_magedepth(list(total_caps)[0])
with open(json_path, 'r') as f:
common_caps = [prefix + cap for cap in json.load(f)]
common_caps = set(total_caps) & set(common_caps)
return common_caps
def _extend_img_path_to_scene_cap_index_dict(self, img_path_to_cap_index_dict, scene_id):
for key in img_path_to_cap_index_dict.keys():
self.img_path_to_scene_cap_index_dict[key] = SceneCapIndex(scene_id, img_path_to_cap_index_dict[key])
def _create_scene_index_to_db_caps_mask_dict(self, db_caps_set):
scene_index_to_db_caps_mask_dict = {}
for cap in db_caps_set:
scene_id, cap_id = self.img_path_to_scene_cap_index_dict[cap]
if scene_id not in scene_index_to_db_caps_mask_dict:
scene_index_to_db_caps_mask_dict[scene_id] = []
scene_index_to_db_caps_mask_dict[scene_id].append(cap_id)
for _k, _v in scene_index_to_db_caps_mask_dict.items():
scene_index_to_db_caps_mask_dict[_k] = np.array(sorted(_v))
return scene_index_to_db_caps_mask_dict
def _load_scenes(self):
scenes = []
knn_engine_list = []
total_caps_set = set()
for scene_id, scene_dir_dict in enumerate(self.scenes_name_list):
MegadepthSceneDataBase._load_scene(self.opt, scene_dir_dict)
scene = MegadepthSceneDataBase.scenes[scene_dir_dict['scene_dir']]
knn_engine = MegadepthSceneDataBase.knn_engine_dict[scene_dir_dict['scene_dir']]
total_caps_set = total_caps_set | set(scene.img_path_to_index_dict.keys())
self._extend_img_path_to_scene_cap_index_dict(scene.img_path_to_index_dict, scene_id)
scenes.append(scene)
knn_engine_list.append(knn_engine)
self.scenes = scenes
self.knn_engine_list = knn_engine_list
self.total_caps_set = total_caps_set
self.query_caps_set = self._get_common_subset_caps_from_json(dataset_config[self.opt.dataset_name][f'{self.dataset_type}_json'], total_caps_set)
self.db_caps_set = self._get_common_subset_caps_from_json(dataset_config[self.opt.dataset_name]['train_json'], total_caps_set)
self.scene_index_to_db_caps_mask_dict = self._create_scene_index_to_db_caps_mask_dict(self.db_caps_set)
def get_query_with_knn(self, index):
scene_index, cap_index = self.get_scene_cap_index_by_index(index)
query_cap = self.scenes[scene_index].captures[cap_index]
knn_engine = self.knn_engine_list[scene_index]
if scene_index in self.scene_index_to_db_caps_mask_dict:
db_mask = self.scene_index_to_db_caps_mask_dict[scene_index]
else:
db_mask = None
pool = knn_engine.get_knn(query_cap, self.opt.pool_size, db_mask=db_mask)
nn_caps = random.sample(pool, min(len(pool), self.opt.k_size))
return query_cap, nn_caps
```
#### File: COTR/models/cotr_model.py
```python
import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from COTR.utils import debug_utils, constants, utils
from .misc import (NestedTensor, nested_tensor_from_tensor_list)
from .backbone import build_backbone, build_fast_backbone
from .transformer import build_transformer, Transformer
from .position_encoding import NerfPositionalEncoding, MLP
from .light_transformer import *
class COTR(nn.Module):
def __init__(self, backbone, transformer, sine_type='lin_sine'):
super().__init__()
self.transformer = transformer
hidden_dim = transformer.d_model
self.corr_embed = MLP(hidden_dim, hidden_dim, 2, 3)
self.query_proj = NerfPositionalEncoding(hidden_dim // 4, sine_type)
self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)
self.backbone = backbone
def forward(self, samples: NestedTensor, queries):
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.backbone(samples)
src, mask = features[-1].decompose()
assert mask is not None
_b, _q, _ = queries.shape
queries = queries.reshape(-1, 2)
queries = self.query_proj(queries).reshape(_b, _q, -1)
queries = queries.permute(1, 0, 2)
hs = self.transformer(self.input_proj(src), mask, queries, pos[-1])[0]
outputs_corr = self.corr_embed(hs)
out = {'pred_corrs': outputs_corr[-1]}
return out
class FastCOTR(nn.Module):
def __init__(self, backbone, transformer, sine_type='lin_sine'):
super().__init__()
self.transformer = transformer
hidden_dim = transformer.d_model
self.corr_embed = MLP(hidden_dim, hidden_dim, 2, 3)
self.query_proj = NerfPositionalEncoding(hidden_dim // 4, sine_type)
self.low_res_proj = nn.Conv2d(backbone.num_low_res_channels, hidden_dim, kernel_size=1)
self.high_res_proj = nn.Conv2d(backbone.num_high_res_channels, hidden_dim, kernel_size=1)
self.backbone = backbone
def forward(self, samples: NestedTensor, queries):
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.backbone(samples)
orig_queries = queries.clone()
# low res
src, mask = features[0].decompose()
assert src.shape[-1] % 2 == 0
assert mask is not None and mask.any() == False
low_res = src.shape[-1] // 2
src = self.low_res_proj(src)
src = src + pos[0]
left_feat, right_feat = src[..., :low_res], src[..., low_res:]
left_feat, right_feat = left_feat.flatten(2).permute(0, 2, 1), right_feat.flatten(2).permute(0, 2, 1)
_b, _q, _ = queries.shape
queries = queries.reshape(-1, 2)
queries = self.query_proj(queries).reshape(_b, _q, -1)
# queries = queries.permute(1, 0, 2)
hs = self.transformer(left_feat, right_feat, queries)
outputs_corr = self.corr_embed(hs)
out = {'pred_corrs': outputs_corr}
# high res
# src, mask = features[1].decompose()
# assert src.shape[1] == 128
# assert mask is not None
# feat_fom = utils.sample_feature_map(
# src[:, :64, ...],
# )
# exec(debug_utils.embed_breakpoint('check resolution'))
return out
def build(args):
use_new = args.arch_version == 'v2'
if not use_new:
backbone = build_backbone(args)
transformer = build_transformer(args)
model = COTR(
backbone,
transformer,
sine_type=args.position_embedding,
)
else:
backbone = build_fast_backbone(args)
transformer = FastCorrespondenceTransformer(
d_model=args.hidden_dim,
nhead=args.nheads,
layer_names = ['self', 'cross', 'self', 'cross', 'self', 'cross']
)
model = FastCOTR(
backbone,
transformer,
sine_type=args.position_embedding,
)
return model
```
#### File: COTR/projector/pcd_projector.py
```python
import numpy as np
from COTR.utils import debug_utils, utils
def render_point_cloud_at_capture(point_cloud, capture, render_type='rgb', return_pcd=False):
assert render_type in ['rgb', 'bw', 'depth']
if render_type == 'rgb':
assert point_cloud.shape[1] == 6
else:
point_cloud = point_cloud[:, :3]
assert point_cloud.shape[1] == 3
if render_type in ['bw', 'rgb']:
keep_z = False
else:
keep_z = True
pcd_2d = PointCloudProjector.pcd_3d_to_pcd_2d_np(point_cloud,
capture.intrinsic_mat,
capture.extrinsic_mat,
capture.size,
keep_z=True,
crop=True,
filter_neg=True,
norm_coord=False,
return_index=False)
reproj = PointCloudProjector.pcd_2d_to_img_2d_np(pcd_2d,
capture.size,
has_z=True,
keep_z=keep_z)
if return_pcd:
return reproj, pcd_2d
else:
return reproj
def optical_flow_from_a_to_b(cap_a, cap_b):
cap_a_intrinsic = cap_a.pinhole_cam.intrinsic_mat
cap_a_img_size = cap_a.pinhole_cam.shape[:2]
_h, _w = cap_b.pinhole_cam.shape[:2]
x, y = np.meshgrid(
np.linspace(0, _w - 1, num=_w),
np.linspace(0, _h - 1, num=_h),
)
coord_map = np.concatenate([np.expand_dims(x, 2), np.expand_dims(y, 2)], axis=2)
pcd_from_cap_b = cap_b.get_point_cloud_world_from_depth(coord_map)
# pcd_from_cap_b = cap_b.point_cloud_world_w_feat(['pos', 'coord'])
optical_flow = PointCloudProjector.pcd_2d_to_img_2d_np(PointCloudProjector.pcd_3d_to_pcd_2d_np(pcd_from_cap_b, cap_a_intrinsic, cap_a.cam_pose.world_to_camera[0:3, :], cap_a_img_size, keep_z=True, crop=True, filter_neg=True, norm_coord=False), cap_a_img_size, has_z=True, keep_z=False)
return optical_flow
class PointCloudProjector():
def __init__(self):
pass
@staticmethod
def pcd_2d_to_pcd_3d_np(pcd, depth, intrinsic, motion=None, return_index=False):
assert isinstance(pcd, np.ndarray), 'cannot process data type: {0}'.format(type(pcd))
assert isinstance(intrinsic, np.ndarray), 'cannot process data type: {0}'.format(type(intrinsic))
assert len(pcd.shape) == 2 and pcd.shape[1] >= 2
assert len(depth.shape) == 2 and depth.shape[1] == 1
assert intrinsic.shape == (3, 3)
if motion is not None:
assert isinstance(motion, np.ndarray), 'cannot process data type: {0}'.format(type(motion))
assert motion.shape == (4, 4)
# exec(debug_utils.embed_breakpoint())
x, y, z = pcd[:, 0], pcd[:, 1], depth[:, 0]
append_ones = np.ones_like(x)
xyz = np.stack([x, y, append_ones], axis=1) # shape: [num_points, 3]
inv_intrinsic_mat = np.linalg.inv(intrinsic)
xyz = np.matmul(inv_intrinsic_mat, xyz.T).T * z[..., None]
valid_mask_1 = np.where(xyz[:, 2] > 0)
xyz = xyz[valid_mask_1]
if motion is not None:
append_ones = np.ones_like(xyz[:, 0:1])
xyzw = np.concatenate([xyz, append_ones], axis=1)
xyzw = np.matmul(motion, xyzw.T).T
valid_mask_2 = np.where(xyzw[:, 3] != 0)
xyzw = xyzw[valid_mask_2]
xyzw /= xyzw[:, 3:4]
xyz = xyzw[:, 0:3]
if pcd.shape[1] > 2:
features = pcd[:, 2:]
try:
features = features[valid_mask_1][valid_mask_2]
except UnboundLocalError:
features = features[valid_mask_1]
assert xyz.shape[0] == features.shape[0]
xyz = np.concatenate([xyz, features], axis=1)
if return_index:
points_index = np.arange(pcd.shape[0])[valid_mask_1][valid_mask_2]
return xyz, points_index
return xyz
@staticmethod
def img_2d_to_pcd_3d_np(depth, intrinsic, img=None, motion=None):
'''
the function signature is not fully correct, because img is an optional
if motion is None, the output pcd is in camera space
if motion is camera_to_world, the out pcd is in world space.
here the output is pure np array
'''
assert isinstance(depth, np.ndarray), 'cannot process data type: {0}'.format(type(depth))
assert isinstance(intrinsic, np.ndarray), 'cannot process data type: {0}'.format(type(intrinsic))
assert len(depth.shape) == 2
assert intrinsic.shape == (3, 3)
if img is not None:
assert isinstance(img, np.ndarray), 'cannot process data type: {0}'.format(type(img))
assert len(img.shape) == 3
assert img.shape[:2] == depth.shape[:2], 'feature should have the same resolution as the depth'
if motion is not None:
assert isinstance(motion, np.ndarray), 'cannot process data type: {0}'.format(type(motion))
assert motion.shape == (4, 4)
pcd_image_space = PointCloudProjector.img_2d_to_pcd_2d_np(depth[..., None], norm_coord=False)
valid_mask_1 = np.where(pcd_image_space[:, 2] > 0)
pcd_image_space = pcd_image_space[valid_mask_1]
xy = pcd_image_space[:, :2]
z = pcd_image_space[:, 2:3]
if img is not None:
_c = img.shape[-1]
feat = img.reshape(-1, _c)
feat = feat[valid_mask_1]
xy = np.concatenate([xy, feat], axis=1)
pcd_3d = PointCloudProjector.pcd_2d_to_pcd_3d_np(xy, z, intrinsic, motion=motion)
return pcd_3d
@staticmethod
def pcd_3d_to_pcd_2d_np(pcd, intrinsic, extrinsic, size, keep_z: bool, crop: bool = True, filter_neg: bool = True, norm_coord: bool = True, return_index: bool = False):
assert isinstance(pcd, np.ndarray), 'cannot process data type: {0}'.format(type(pcd))
assert isinstance(intrinsic, np.ndarray), 'cannot process data type: {0}'.format(type(intrinsic))
assert isinstance(extrinsic, np.ndarray), 'cannot process data type: {0}'.format(type(extrinsic))
assert len(pcd.shape) == 2 and pcd.shape[1] >= 3, 'seems the input pcd is not a valid 3d point cloud: {0}'.format(pcd.shape)
xyzw = np.concatenate([pcd[:, 0:3], np.ones_like(pcd[:, 0:1])], axis=1)
mvp_mat = np.matmul(intrinsic, extrinsic)
camera_points = np.matmul(mvp_mat, xyzw.T).T
if filter_neg:
valid_mask_1 = camera_points[:, 2] > 0.0
else:
valid_mask_1 = np.ones_like(camera_points[:, 2], dtype=bool)
camera_points = camera_points[valid_mask_1]
image_points = camera_points / camera_points[:, 2:3]
image_points = image_points[:, :2]
if crop:
valid_mask_2 = (image_points[:, 0] >= 0) * (image_points[:, 0] < size[1] - 1) * (image_points[:, 1] >= 0) * (image_points[:, 1] < size[0] - 1)
else:
valid_mask_2 = np.ones_like(image_points[:, 0], dtype=bool)
if norm_coord:
image_points = ((image_points / size[::-1]) * 2) - 1
if keep_z:
image_points = np.concatenate([image_points[valid_mask_2], camera_points[valid_mask_2][:, 2:3], pcd[valid_mask_1][:, 3:][valid_mask_2]], axis=1)
else:
image_points = np.concatenate([image_points[valid_mask_2], pcd[valid_mask_1][:, 3:][valid_mask_2]], axis=1)
# if filter_neg and crop:
# exec(debug_utils.embed_breakpoint('pcd_3d_to_pcd_2d_np'))
if return_index:
points_index = np.arange(pcd.shape[0])[valid_mask_1][valid_mask_2]
return image_points, points_index
return image_points
@staticmethod
def pcd_2d_to_img_2d_np(pcd, size, has_z=False, keep_z=False):
assert len(pcd.shape) == 2 and pcd.shape[-1] >= 2, 'seems the input pcd is not a valid point cloud: {0}'.format(pcd.shape)
# assert 0, 'pass Z values in'
if has_z:
pcd = pcd[pcd[:, 2].argsort()[::-1]]
if not keep_z:
pcd = np.delete(pcd, [2], axis=1)
index_list = np.round(pcd[:, 0:2]).astype(np.int32)
index_list[:, 0] = np.clip(index_list[:, 0], 0, size[1] - 1)
index_list[:, 1] = np.clip(index_list[:, 1], 0, size[0] - 1)
_h, _w, _c = *size, pcd.shape[-1] - 2
if _c == 0:
canvas = np.zeros((_h, _w, 1))
canvas[index_list[:, 1], index_list[:, 0]] = 1.0
else:
canvas = np.zeros((_h, _w, _c))
canvas[index_list[:, 1], index_list[:, 0]] = pcd[:, 2:]
return canvas
@staticmethod
def img_2d_to_pcd_2d_np(img, norm_coord=True):
assert isinstance(img, np.ndarray), 'cannot process data type: {0}'.format(type(img))
assert len(img.shape) == 3
_h, _w, _c = img.shape
if norm_coord:
x, y = np.meshgrid(
np.linspace(-1, 1, num=_w),
np.linspace(-1, 1, num=_h),
)
else:
x, y = np.meshgrid(
np.linspace(0, _w - 1, num=_w),
np.linspace(0, _h - 1, num=_h),
)
x, y = x.reshape(-1, 1), y.reshape(-1, 1)
feat = img.reshape(-1, _c)
pcd_2d = np.concatenate([x, y, feat], axis=1)
return pcd_2d
```
#### File: COTR/sfm_scenes/knn_search.py
```python
import os
import numpy as np
from COTR.utils import debug_utils
from COTR.utils.constants import VALID_NN_OVERLAPPING_THRESH
class ReprojRatioKnnSearch():
def __init__(self, scene):
self.scene = scene
self.distance_mat = None
self.nn_index = None
self._read_dist_mat()
self._build_nn_index()
def _read_dist_mat(self):
dist_mat_path = os.path.join(os.path.dirname(os.path.dirname(self.scene.captures[0].depth_path)), 'dist_mat/dist_mat.npy')
self.distance_mat = np.load(dist_mat_path)
def _build_nn_index(self):
# argsort is in ascending order, so we take negative
self.nn_index = (-1 * self.distance_mat).argsort(axis=1)
def get_knn(self, query, k, db_mask=None):
query_index = self.scene.img_path_to_index_dict[query.img_path]
if db_mask is not None:
query_mask = np.setdiff1d(np.arange(self.distance_mat[query_index].shape[0]), db_mask)
num_pos = (self.distance_mat[query_index] > VALID_NN_OVERLAPPING_THRESH).sum() if db_mask is None else (self.distance_mat[query_index][db_mask] > VALID_NN_OVERLAPPING_THRESH).sum()
# we have enough valid NN or not
if num_pos > k:
if db_mask is None:
ind = self.nn_index[query_index][:k + 1]
else:
temp_dist = self.distance_mat[query_index].copy()
temp_dist[query_mask] = -1
ind = (-1 * temp_dist).argsort(axis=0)[:k + 1]
# remove self
if query_index in ind:
ind = np.delete(ind, np.argwhere(ind == query_index))
else:
ind = ind[:k]
assert ind.shape[0] <= k, ind.shape[0] > 0
else:
k = num_pos
if db_mask is None:
ind = self.nn_index[query_index][:max(k, 1)]
else:
temp_dist = self.distance_mat[query_index].copy()
temp_dist[query_mask] = -1
ind = (-1 * temp_dist).argsort(axis=0)[:max(k, 1)]
return self.scene.get_captures_given_index_list(ind)
```
#### File: COTR/trainers/base_trainer.py
```python
import os
import math
import abc
import time
import tqdm
import torch.nn as nn
import tensorboardX
from COTR.trainers import tensorboard_helper
from COTR.utils import utils
from COTR.options import options_utils
class BaseTrainer(abc.ABC):
'''base trainer class.
contains methods for training, validation, and writing output.
'''
def __init__(self, opt, model, optimizer, criterion,
train_loader, val_loader):
self.opt = opt
self.use_cuda = opt.use_cuda
self.model = model
self.optim = optimizer
self.criterion = criterion
self.train_loader = train_loader
self.val_loader = val_loader
self.out = opt.out
if not os.path.exists(opt.out):
os.makedirs(opt.out)
self.epoch = 0
self.iteration = 0
self.max_iter = opt.max_iter
self.valid_iter = opt.valid_iter
self.tb_pusher = tensorboard_helper.TensorboardPusher(opt)
self.push_opt_to_tb()
self.need_resume = opt.resume
if self.need_resume:
self.resume()
if self.opt.load_weights:
self.load_pretrained_weights()
def push_opt_to_tb(self):
opt_str = options_utils.opt_to_string(self.opt)
tb_datapack = tensorboard_helper.TensorboardDatapack()
tb_datapack.set_training(False)
tb_datapack.set_iteration(self.iteration)
tb_datapack.add_text({'options': opt_str})
self.tb_pusher.push_to_tensorboard(tb_datapack)
@abc.abstractmethod
def validate_batch(self, data_pack):
pass
@abc.abstractmethod
def validate(self):
pass
@abc.abstractmethod
def train_batch(self, data_pack):
'''train for one batch of data
'''
pass
def train_epoch(self):
'''train for one epoch
one epoch is iterating the whole training dataset once
'''
self.model.train()
for batch_idx, data_pack in tqdm.tqdm(enumerate(self.train_loader),
initial=self.iteration % len(
self.train_loader),
total=len(self.train_loader),
desc='Train epoch={0}'.format(
self.epoch),
ncols=80,
leave=True,
):
# iteration = batch_idx + self.epoch * len(self.train_loader)
# if self.iteration != 0 and (iteration - 1) != self.iteration:
# continue # for resuming
# self.iteration = iteration
# self.iteration += 1
if self.iteration % self.valid_iter == 0:
time.sleep(2) # Prevent possible deadlock during epoch transition
self.validate()
self.train_batch(data_pack)
if self.iteration >= self.max_iter:
break
self.iteration += 1
def train(self):
'''entrance of the whole training process
'''
max_epoch = int(math.ceil(1. * self.max_iter / len(self.train_loader)))
for epoch in tqdm.trange(self.epoch,
max_epoch,
desc='Train',
ncols=80):
self.epoch = epoch
time.sleep(2) # Prevent possible deadlock during epoch transition
self.train_epoch()
if self.iteration >= self.max_iter:
break
@abc.abstractmethod
def resume(self):
pass
```
#### File: jiangwei221/COTR-1/demo_face.py
```python
import argparse
import os
import time
import cv2
import numpy as np
import torch
import imageio
import matplotlib.pyplot as plt
from COTR.utils import utils, debug_utils
from COTR.models import build_model
from COTR.options.options import *
from COTR.options.options_utils import *
from COTR.inference.inference_helper import triangulate_corr
from COTR.inference.sparse_engine import SparseEngine
utils.fix_randomness(0)
torch.set_grad_enabled(False)
def main(opt):
model = build_model(opt)
model = model.cuda()
weights = torch.load(opt.load_weights_path, map_location='cpu')['model_state_dict']
utils.safe_load_weights(model, weights)
model = model.eval()
img_a = imageio.imread('./sample_data/imgs/face_1.png', pilmode='RGB')
img_b = imageio.imread('./sample_data/imgs/face_2.png', pilmode='RGB')
queries = np.load('./sample_data/face_landmarks.npy')[0]
engine = SparseEngine(model, 32, mode='stretching')
corrs = engine.cotr_corr_multiscale(img_a, img_b, np.linspace(0.5, 0.0625, 4), 1, queries_a=queries, force=False)
f, axarr = plt.subplots(1, 2)
axarr[0].imshow(img_a)
axarr[0].scatter(*queries.T, s=1)
axarr[0].title.set_text('Reference Face')
axarr[0].axis('off')
axarr[1].imshow(img_b)
axarr[1].scatter(*corrs[:, 2:].T, s=1)
axarr[1].title.set_text('Target Face')
axarr[1].axis('off')
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
set_COTR_arguments(parser)
parser.add_argument('--out_dir', type=str, default=general_config['out'], help='out directory')
parser.add_argument('--load_weights', type=str, default=None, help='load a pretrained set of weights, you need to provide the model id')
opt = parser.parse_args()
opt.command = ' '.join(sys.argv)
layer_2_channels = {'layer1': 256,
'layer2': 512,
'layer3': 1024,
'layer4': 2048, }
opt.dim_feedforward = layer_2_channels[opt.layer]
if opt.load_weights:
opt.load_weights_path = os.path.join(opt.out_dir, opt.load_weights, 'checkpoint.pth.tar')
print_opt(opt)
main(opt)
```
#### File: COTR-1/scripts/prepare_megadepth_valid_list.py
```python
import os
import json
import tables
from tqdm import tqdm
import numpy as np
def read_all_imgs(base_dir):
all_imgs = []
for cur, dirs, files in os.walk(base_dir):
if 'imgs' in cur:
all_imgs += [os.path.join(cur, f) for f in files]
all_imgs.sort()
return all_imgs
def filter_semantic_depth(imgs):
valid_imgs = []
for item in tqdm(imgs):
f_name = os.path.splitext(os.path.basename(item))[0] + '.h5'
depth_dir = os.path.abspath(os.path.join(os.path.dirname(item), '../depths'))
depth_path = os.path.join(depth_dir, f_name)
depth_h5 = tables.open_file(depth_path, mode='r')
_depth = np.array(depth_h5.root.depth)
if _depth.min() >= 0:
prefix = os.path.abspath(os.path.join(item, '../../../../')) + '/'
rel_image_path = item.replace(prefix, '')
valid_imgs.append(rel_image_path)
depth_h5.close()
valid_imgs.sort()
return valid_imgs
if __name__ == "__main__":
MegaDepth_v1 = '/media/jiangwei/data_ssd/MegaDepth_v1/'
assert os.path.isdir(MegaDepth_v1), 'Change to your local path'
all_imgs = read_all_imgs(MegaDepth_v1)
valid_imgs = filter_semantic_depth(all_imgs)
with open('megadepth_valid_list.json', 'w') as outfile:
json.dump(valid_imgs, outfile, indent=4)
``` |
{
"source": "jiangwei221/image-matching-benchmark",
"score": 3
} |
#### File: third_party/utils/eval_helper.py
```python
import math
import cv2
import numpy as np
def align_model(model, rot, trans, scale):
return (np.matmul(rot, model) + trans) * scale
def align(model, data):
'''
Source: https://vision.in.tum.de/data/datasets/rgbd-dataset/tools
#absolute_trajectory_error_ate
Align two trajectories using the method of Horn (closed-form).
Input:
model -- first trajectory (3xn)
data -- second trajectory (3xn)
Output:
rot -- rotation matrix (3x3)
trans -- translation vector (3x1)
trans_error -- translational error per point (1xn)
'''
if model.shape[1] < 3:
print('Need at least 3 points for ATE: {}'.format(model))
return np.identity(3), np.zeros((3, 1)), 1
# Get zero centered point cloud
model_zerocentered = model - model.mean(1, keepdims=True)
data_zerocentered = data - data.mean(1, keepdims=True)
# constructed covariance matrix
W = np.zeros((3, 3))
for column in range(model.shape[1]):
W += np.outer(model_zerocentered[:, column],
data_zerocentered[:, column])
# SVD
U, d, Vh = np.linalg.linalg.svd(W.transpose())
S = np.identity(3)
if (np.linalg.det(U) * np.linalg.det(Vh) < 0):
S[2, 2] = -1
rot = np.matmul(np.matmul(U, S), Vh)
trans = data.mean(1, keepdims=True) - np.matmul(
rot, model.mean(1, keepdims=True))
# apply rot and trans to point cloud
model_aligned = align_model(model, rot, trans, 1.0)
model_aligned_zerocentered = model_aligned - model_aligned.mean(
1, keepdims=True)
# calc scale based on distance to point cloud center
data_dist = np.sqrt((data_zerocentered * data_zerocentered).sum(axis=0))
model_aligned_dist = np.sqrt(
(model_aligned_zerocentered * model_aligned_zerocentered).sum(axis=0))
scale_array = data_dist / model_aligned_dist
scale = np.median(scale_array)
return rot, trans, scale
def quaternion_matrix(quaternion):
'''Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
True
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, numpy.identity(4))
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
'''
q = np.array(quaternion, dtype=np.float64, copy=True)
n = np.dot(q, q)
if n < _EPS:
return np.identity(4)
q *= math.sqrt(2.0 / n)
q = np.outer(q, q)
return np.array(
[[1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0], 0.0],
[q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0], 0.0],
[q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2], 0.0],
[0.0, 0.0, 0.0, 1.0]])
def quaternion_from_matrix(matrix, isprecise=False):
'''Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q = quaternion_from_matrix(numpy.identity(4), True)
>>> numpy.allclose(q, [1, 0, 0, 0])
True
>>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))
>>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])
True
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R, True)
>>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
>>> R = random_rotation_matrix()
>>> q = quaternion_from_matrix(R)
>>> is_same_transform(R, quaternion_matrix(q))
True
>>> R = euler_matrix(0.0, 0.0, numpy.pi/2.0)
>>> numpy.allclose(quaternion_from_matrix(R, isprecise=False),
... quaternion_from_matrix(R, isprecise=True))
True
'''
M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4]
if isprecise:
q = np.empty((4, ))
t = np.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 1, 2, 3
if M[1, 1] > M[0, 0]:
i, j, k = 2, 3, 1
if M[2, 2] > M[i, i]:
i, j, k = 3, 1, 2
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = np.array([[m00 - m11 - m22, 0.0, 0.0, 0.0],
[m01 + m10, m11 - m00 - m22, 0.0, 0.0],
[m02 + m20, m12 + m21, m22 - m00 - m11, 0.0],
[m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = np.linalg.eigh(K)
q = V[[3, 0, 1, 2], np.argmax(w)]
if q[0] < 0.0:
np.negative(q, q)
return q
```
#### File: image-matching-benchmark/utils/feature_helper.py
```python
import os
import cv2
import numpy as np
from utils.path_helper import get_desc_file, get_kp_file
# ----------------------------------------------------------------------
# Global constants
# Keypoint List Structure Index Info
IDX_X, IDX_Y, IDX_SIZE, IDX_ANGLE, IDX_RESPONSE, IDX_OCTAVE = (
0, 1, 2, 3, 4, 5) # , IDX_CLASSID not used
IDX_a, IDX_b, IDX_c = (6, 7, 8)
# NOTE the row-major colon-major adaptation here
IDX_A0, IDX_A2, IDX_A1, IDX_A3 = (9, 10, 11, 12)
# # IDX_CLASSID for KAZE
# IDX_CLASSID = 13
KP_LIST_LEN = 13
# ----------------------------------------------------------------------
def l_clahe(img):
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
lab = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)
lab[:, :, 0] = clahe.apply(lab[:, :, 0])
return cv2.cvtColor(lab, cv2.COLOR_Lab2RGB)
def update_affine(kp):
'''Returns an updated version of the keypoint.
Note
----
This function should be applied only to individual keypoints, not a list.
'''
# Compute A0, A1, A2, A3
S = np.asarray([[kp[IDX_a], kp[IDX_b]], [kp[IDX_b], kp[IDX_c]]])
invS = np.linalg.inv(S)
a = np.sqrt(invS[0, 0])
b = invS[0, 1] / max(a, 1e-18)
A = np.asarray([[a, 0], [b, np.sqrt(max(invS[1, 1] - b**2, 0))]])
# We need to rotate first!
cos_val = np.cos(np.deg2rad(kp[IDX_ANGLE]))
sin_val = np.sin(np.deg2rad(kp[IDX_ANGLE]))
R = np.asarray([[cos_val, -sin_val], [sin_val, cos_val]])
A = np.dot(A, R)
kp[IDX_A0] = A[0, 0]
kp[IDX_A1] = A[0, 1]
kp[IDX_A2] = A[1, 0]
kp[IDX_A3] = A[1, 1]
return kp
def kp_list_2_opencv_kp_list(kp_list):
'''Converts our kp list structure into opencv keypoints.
Note that the size is now diameter.
'''
opencv_kp_list = []
for kp in kp_list:
opencv_kp = cv2.KeyPoint(
x=kp[IDX_X],
y=kp[IDX_Y],
_size=kp[IDX_SIZE] * 2.0,
_angle=kp[IDX_ANGLE],
_response=kp[IDX_RESPONSE],
_octave=np.int32(kp[IDX_OCTAVE]),
# _class_id=np.int32(kp[IDX_CLASSID])
)
opencv_kp_list += [opencv_kp]
return opencv_kp_list
def opencv_kp_list_2_kp_list(opencv_kp_list):
'''Converts opencv keypoints into the kp list structure.
Note that the size is now radius.
'''
kp_list = []
for opencv_kp in opencv_kp_list:
kp = np.zeros((KP_LIST_LEN, ))
kp[IDX_X] = opencv_kp.pt[0]
kp[IDX_Y] = opencv_kp.pt[1]
kp[IDX_SIZE] = opencv_kp.size * 0.5
kp[IDX_ANGLE] = opencv_kp.angle
kp[IDX_RESPONSE] = opencv_kp.response
kp[IDX_OCTAVE] = opencv_kp.octave
# Compute a,b,c for vgg affine
kp[IDX_a] = 1. / (kp[IDX_SIZE]**2)
kp[IDX_b] = 0.
kp[IDX_c] = 1. / (kp[IDX_SIZE]**2)
# Compute A0, A1, A2, A3 and update
kp = update_affine(kp)
# kp[IDX_CLASSID] = opencv_kp.class_id
kp_list += [kp]
return kp_list
def convert_opencv_kp_desc(kp, desc, num_kp):
'''Converts opencv keypoints and descriptors to benchmark format.
Parameters
----------
kp: list
List of keypoints in opencv format
desc: list
List of descriptors in opencv format
num_kp: int
Number of keypoints to extract per image
'''
# Convert OpenCV keypoints to list data structure used for the benchmark.
kp = opencv_kp_list_2_kp_list(kp)
# Sort keypoints and descriptors by keypoints response
kp_desc = [(_kp, _desc)
for _kp, _desc in sorted(zip(kp, desc),
key=lambda x: x[0][IDX_RESPONSE])]
kp_sorted = [kp for kp, desc in kp_desc]
desc_sorted = [desc for kp, desc in kp_desc]
# Reverse for descending order
keypoints = kp_sorted[::-1]
descriptors = desc_sorted[::-1]
# Remove redundant points
cur_num_kp = len(keypoints)
keypoints = keypoints[:min(cur_num_kp, num_kp)]
descriptors = descriptors[:min(cur_num_kp, num_kp)]
return keypoints, descriptors
def is_feature_complete(cfg):
"""Checks if feature extraction is complete."""
# is_complete = os.path.exists(get_kp_file(cfg)) and os.path.exists(
# get_desc_file(cfg))
is_complete = os.path.exists(get_kp_file(cfg))
return is_complete
```
#### File: image-matching-benchmark/utils/filter_helper.py
```python
import os
from utils.path_helper import get_filter_match_file
def is_filter_complete(cfg):
'''Checks if stereo evaluation is complete.'''
# We should have the colmap pose file and no colmap temp path
is_complete = os.path.exists(get_filter_match_file(cfg))
return is_complete
```
#### File: image-matching-benchmark/utils/match_helper.py
```python
import itertools
import os
import cv2
import numpy as np
from utils.load_helper import load_vis
from utils.path_helper import get_match_file
def is_match_complete(cfg):
'''Checks if match computation is complete.'''
is_complete = os.path.exists(get_match_file(cfg))
return is_complete
def get_matching_dist_type(cfg):
method_match = cfg.method_dict['config_{}_{}'.format(
cfg.dataset, cfg.task)]['matcher']
if 'distance' in method_match:
dist_name = method_match['distance']
if dist_name.lower() == 'l2':
dist = cv2.NORM_L2
elif dist_name.lower() == 'l1':
dist = cv2.NORM_L1
elif dist_name.lower() == 'hamming':
dist = cv2.NORM_HAMMING
else:
raise ValueError('Unknown distance', dist_name)
return dist
else:
raise ValueError('Distance type is not set')
def compute_image_pairs(vis_list, num_images, vis_th, subset_index=None):
if subset_index is None:
vis = load_vis(vis_list)
else:
vis = load_vis(vis_list, subset_index)
image_pairs = []
for ii, jj in itertools.product(range(num_images), range(num_images)):
if ii != jj:
if vis[ii][jj] > vis_th:
image_pairs.append((ii, jj))
return image_pairs
def remove_duplicate_matches(matches, kp1, kp2):
''' Conveniency function to remove duplicate matches in multiple geometry
models. This is due to methods such as SIFT that have multiple scale or
orientation values.
Parameters
----------
matches: [2 x N] list of indices to the list of keypoints
kp1, kp1: [(M1, M2) x 2] lists of keypoints
Output
-------
unique_matches: subset of matches with duplicates removed
'''
if matches.size > 0:
if matches.ndim == 1:
matches = np.expand_dims(matches, axis=1)
_, unique_indices = np.unique([
np.concatenate((p1, p2))
for p1, p2 in zip(kp1[matches[0], :2], kp2[matches[1], :2])
],
axis=0,
return_index=True)
unique_matches = matches[:, unique_indices]
return unique_matches
else:
return matches
``` |
{
"source": "jiangwei221/kornia",
"score": 3
} |
#### File: kornia/utils/image.py
```python
from typing import Optional
import numpy as np
import torch
def image_to_tensor(image: np.array) -> torch.Tensor:
"""Converts a numpy image to a PyTorch tensor image.
Args:
image (numpy.ndarray): image of the form :math:`(H, W, C)`.
Returns:
torch.Tensor: tensor of the form :math:`(C, H, W)`.
"""
if not type(image) == np.ndarray:
raise TypeError("Input type is not a numpy.ndarray. Got {}".format(
type(image)))
if len(image.shape) > 3 or len(image.shape) < 2:
raise ValueError("Input size must be a two or three dimensional array")
tensor: torch.Tensor = torch.from_numpy(image)
if len(tensor.shape) == 2:
tensor = torch.unsqueeze(tensor, dim=-1)
return tensor.permute(2, 0, 1).squeeze_() # CxHxW
def tensor_to_image(tensor: torch.Tensor) -> np.array:
"""Converts a PyTorch tensor image to a numpy image. In case the tensor is
in the GPU, it will be copied back to CPU.
Args:
tensor (torch.Tensor): image of the form :math:`(C, H, W)`.
Returns:
numpy.ndarray: image of the form :math:`(H, W, C)`.
"""
if not torch.is_tensor(tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(tensor)))
if len(tensor.shape) > 3 or len(tensor.shape) < 2:
raise ValueError(
"Input size must be a two or three dimensional tensor")
input_shape = tensor.shape
if len(input_shape) == 2:
tensor = torch.unsqueeze(tensor, dim=0)
tensor = tensor.permute(1, 2, 0)
if len(input_shape) == 2:
tensor = torch.squeeze(tensor, dim=-1)
return tensor.cpu().detach().numpy()
```
#### File: test/filters/test_blur.py
```python
import pytest
from typing import Tuple
import torch
import kornia as kornia
from torch.testing import assert_allclose
from torch.autograd import gradcheck
import utils # test utils
class TestBoxBlur:
def test_shape(self):
inp = torch.zeros(1, 3, 4, 4)
blur = kornia.filters.BoxBlur((3, 3))
assert blur(inp).shape == (1, 3, 4, 4)
def test_shape_batch(self):
inp = torch.zeros(2, 6, 4, 4)
blur = kornia.filters.BoxBlur((3, 3))
assert blur(inp).shape == (2, 6, 4, 4)
def test_kernel_3x3(self):
inp = torch.tensor([[[
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2.],
[2., 2., 2., 2., 2.]
]]])
kernel_size = (3, 3)
actual = kornia.filters.box_blur(inp, kernel_size)
assert_allclose(actual[0, 0, 1, 1:4], torch.tensor(1.))
def test_kernel_5x5(self):
inp = torch.tensor([[[
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2.],
[2., 2., 2., 2., 2.]
]]])
kernel_size = (5, 5)
actual = kornia.filters.box_blur(inp, kernel_size)
assert_allclose(actual[0, 0, 1, 2], torch.tensor(1.))
def test_kernel_5x5_batch(self):
batch_size = 3
inp = torch.tensor([[[
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2.],
[2., 2., 2., 2., 2.]
]]]).repeat(batch_size, 1, 1, 1)
kernel_size = (5, 5)
actual = kornia.filters.box_blur(inp, kernel_size)
assert_allclose(actual[0, 0, 1, 2], torch.tensor(1.))
def test_gradcheck(self):
batch_size, channels, height, width = 1, 2, 5, 4
img = torch.rand(batch_size, channels, height, width)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.filters.box_blur, (img, (3, 3),),
raise_exception=True)
def test_jit(self):
@torch.jit.script
def op_script(input: torch.Tensor,
kernel_size: Tuple[int, int]) -> torch.Tensor:
return kornia.filters.box_blur(input, kernel_size)
kernel_size = (3, 3)
img = torch.rand(2, 3, 4, 5)
actual = op_script(img, kernel_size)
expected = kornia.filters.box_blur(img, kernel_size)
assert_allclose(actual, expected)
```
#### File: geometry/transform/test_pyramid.py
```python
import pytest
import torch
import kornia as kornia
from torch.testing import assert_allclose
from torch.autograd import gradcheck
import utils # test utils
class TestPyrUp:
def test_shape(self):
inp = torch.zeros(1, 2, 4, 4)
pyr = kornia.geometry.PyrUp()
assert pyr(inp).shape == (1, 2, 8, 8)
def test_shape_batch(self):
inp = torch.zeros(2, 2, 4, 4)
pyr = kornia.geometry.PyrUp()
assert pyr(inp).shape == (2, 2, 8, 8)
def test_gradcheck(self):
batch_size, channels, height, width = 1, 2, 5, 4
img = torch.rand(batch_size, channels, height, width)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.geometry.pyrup, (img,), raise_exception=True)
def test_jit(self):
@torch.jit.script
def op_script(input):
return kornia.geometry.pyrup(input)
img = torch.rand(2, 3, 4, 5)
actual = op_script(img)
expected = kornia.geometry.pyrup(img)
assert_allclose(actual, expected)
class TestPyrDown:
def test_shape(self):
inp = torch.zeros(1, 2, 4, 4)
pyr = kornia.geometry.PyrDown()
assert pyr(inp).shape == (1, 2, 2, 2)
def test_shape_batch(self):
inp = torch.zeros(2, 2, 4, 4)
pyr = kornia.geometry.PyrDown()
assert pyr(inp).shape == (2, 2, 2, 2)
def test_gradcheck(self):
batch_size, channels, height, width = 1, 2, 5, 4
img = torch.rand(batch_size, channels, height, width)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.geometry.pyrdown, (img,), raise_exception=True)
def test_jit(self):
@torch.jit.script
def op_script(input):
return kornia.geometry.pyrdown(input)
img = torch.rand(2, 3, 4, 5)
actual = op_script(img)
expected = kornia.geometry.pyrdown(img)
assert_allclose(actual, expected)
```
#### File: test/utils/test_image.py
```python
import pytest
import numpy as np
import torch
import kornia as kornia
import utils # test utils
@pytest.mark.parametrize("batch_shape",
[(4, 4), (1, 4, 4), (3, 4, 4), ])
def test_tensor_to_image(batch_shape):
tensor = torch.ones(batch_shape)
image = kornia.utils.tensor_to_image(tensor)
assert image.shape[:2] == batch_shape[-2:]
assert isinstance(image, np.ndarray)
@pytest.mark.parametrize("batch_shape",
[(4, 4), (4, 4, 1), (4, 4, 3), ])
def test_image_to_tensor(batch_shape):
image = np.ones(batch_shape)
tensor = kornia.utils.image_to_tensor(image)
assert tensor.shape[-2:] == batch_shape[:2]
assert isinstance(tensor, torch.Tensor)
``` |
{
"source": "jiangwei221/vispy",
"score": 3
} |
#### File: tutorial/gl/fireworks.py
```python
import numpy as np
from vispy import app
from vispy.gloo import gl
vertex_code = """
#version 120
uniform float time;
uniform vec3 center;
attribute float lifetime;
attribute vec3 start;
attribute vec3 end;
varying float v_lifetime;
void main () {
if (time < lifetime) {
gl_Position.xyz = start + (time * end) + center;
gl_Position.w = 1.0;
gl_Position.y -= 1.5 * time * time;
} else {
gl_Position = vec4(-1000, -1000, 0, 0);
}
v_lifetime = clamp(1.0 - (time / lifetime), 0.0, 1.0);
gl_PointSize = (v_lifetime * v_lifetime) * 40.0;
}
"""
fragment_code = """
#version 120
uniform vec4 color;
varying float v_lifetime;
void main()
{
float d = 1 - length(gl_PointCoord - vec2(.5,.5)) / (sqrt(2)/2);
gl_FragColor = d*color;
gl_FragColor.a = d;
gl_FragColor.a *= v_lifetime;
}
"""
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, size=(800, 600), title='GL Fireworks',
keys='interactive')
def on_initialize(self, event):
# Build & activate program
self.program = gl.glCreateProgram()
vertex = gl.glCreateShader(gl.GL_VERTEX_SHADER)
fragment = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)
gl.glShaderSource(vertex, vertex_code)
gl.glShaderSource(fragment, fragment_code)
gl.glCompileShader(vertex)
gl.glCompileShader(fragment)
gl.glAttachShader(self.program, vertex)
gl.glAttachShader(self.program, fragment)
gl.glLinkProgram(self.program)
gl.glDetachShader(self.program, vertex)
gl.glDetachShader(self.program, fragment)
gl.glUseProgram(self.program)
# Build vertex buffer
n = 10000
self.data = np.zeros(n, dtype=[('lifetime', np.float32, 1),
('start', np.float32, 3),
('end', np.float32, 3)])
vbuffer = gl.glCreateBuffer()
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, vbuffer)
gl.glBufferData(gl.GL_ARRAY_BUFFER, self.data, gl.GL_DYNAMIC_DRAW)
# Bind buffer attributes
stride = self.data.strides[0]
offset = 0
loc = gl.glGetAttribLocation(self.program, "lifetime")
gl.glEnableVertexAttribArray(loc)
gl.glVertexAttribPointer(loc, 1, gl.GL_FLOAT, False, stride, offset)
offset = self.data.dtype["lifetime"].itemsize
loc = gl.glGetAttribLocation(self.program, "start")
gl.glEnableVertexAttribArray(loc)
gl.glVertexAttribPointer(loc, 3, gl.GL_FLOAT, False, stride, offset)
offset = self.data.dtype["start"].itemsize
loc = gl.glGetAttribLocation(self.program, "end")
gl.glEnableVertexAttribArray(loc)
gl.glVertexAttribPointer(loc, 3, gl.GL_FLOAT, False, stride, offset)
# OpenGL initalization
self.elapsed_time = 0
gl.glClearColor(0, 0, 0, 1)
gl.glDisable(gl.GL_DEPTH_TEST)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE)
gl.glEnable(34370) # gl.GL_VERTEX_PROGRAM_POINT_SIZE
gl.glEnable(34913) # gl.GL_POINT_SPRITE
gl.glViewport(0, 0, *self.physical_size)
self.new_explosion()
self.timer = app.Timer('auto', self.on_timer, start=True)
def on_draw(self, event):
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glDrawArrays(gl.GL_POINTS, 0, len(self.data))
def on_resize(self, event):
gl.glViewport(0, 0, *event.physical_size)
def on_timer(self, event):
self.elapsed_time += 1. / 60.
if self.elapsed_time > 1.5:
self.new_explosion()
self.elapsed_time = 0.0
loc = gl.glGetUniformLocation(self.program, "time")
gl.glUniform1f(loc, self.elapsed_time)
self.update()
def new_explosion(self):
n = len(self.data)
color = np.random.uniform(0.1, 0.9, 4).astype(np.float32)
color[3] = 1.0 / n ** 0.08
loc = gl.glGetUniformLocation(self.program, "color")
gl.glUniform4f(loc, *color)
center = np.random.uniform(-0.5, 0.5, 3)
loc = gl.glGetUniformLocation(self.program, "center")
gl.glUniform3f(loc, *center)
self.data['lifetime'] = np.random.normal(2.0, 0.5, (n,))
self.data['start'] = np.random.normal(0.0, 0.2, (n, 3))
self.data['end'] = np.random.normal(0.0, 1.2, (n, 3))
gl.glBufferData(gl.GL_ARRAY_BUFFER, self.data, gl.GL_DYNAMIC_DRAW)
if __name__ == '__main__':
c = Canvas()
c.show()
app.run()
```
#### File: tutorial/gloo/texture_precision.py
```python
import numpy as np
from vispy import gloo
from vispy import app
W, H = 1024, 1024
# prepare a gradient field with high dynamic range
data = np.zeros((H, W, 3), np.float32)
for i in range(W):
data[:, i, :] = i**2
for i in range(H):
data[i, :, :] *= i**2
data *= 1./data.max()
# prepare a simple quad to cover the viewport
quad = np.zeros(4, dtype=[
('a_position', np.float32, 2),
('a_texcoord', np.float32, 2)
])
quad['a_position'] = np.array([[-1, -1], [+1, -1], [-1, +1], [+1, +1]])
quad['a_texcoord'] = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
vert_shader = """
attribute vec2 a_position;
attribute vec2 a_texcoord;
varying vec2 v_texcoord;
void main()
{
v_texcoord = a_texcoord;
gl_Position = vec4(a_position, 0.0, 1.0);
}
"""
frag_shader = """
uniform sampler2D u_texture;
varying vec2 v_texcoord;
void main()
{
float ndiff;
// an adjacent texel is 1/W further over in normalized texture coordinates
vec2 v_texcoord2 = vec2(clamp(v_texcoord.x + 1.0/%(W)d, 0.0, 1.0),
v_texcoord.y);
vec4 texel1 = texture2D(u_texture, v_texcoord);
vec4 texel2 = texture2D(u_texture, v_texcoord2);
// test for quantized binning of adjacent texels
if (texel1.r == texel2.r && v_texcoord2.x < 1.0 && v_texcoord.y > 0.0)
ndiff = 1.0;
else
ndiff = 0.0;
gl_FragColor = vec4(
fract(texel1.r * 255.0), // render low-significance bits as red
texel1.r, // render high-significance bits as green
ndiff, // flag quantized bands as blue
1);
}
""" % dict(W=W)
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, size=(W, H), keys='interactive')
self._internalformats = [
'rgb8',
'rgb16',
'rgb16f',
'rgb32f'
]
self.program = gloo.Program(vert_shader, frag_shader)
self.program.bind(gloo.VertexBuffer(quad))
self._internalformat = -1
self.texture = gloo.Texture2D(
shape=(H, W, 3),
interpolation='nearest'
)
gloo.set_viewport(0, 0, *self.physical_size)
self.toggle_internalformat()
self.show()
def on_key_press(self, event):
if event.key == 'F':
self.toggle_internalformat()
def toggle_internalformat(self):
self._internalformat = (
(self._internalformat + 1)
% len(self._internalformats)
)
internalformat = self._internalformats[self._internalformat]
print("Requesting texture internalformat %s" % internalformat)
self.texture.resize(
data.shape,
format='rgb',
internalformat=internalformat
)
self.texture.set_data(data)
self.program['u_texture'] = self.texture
self.update()
def on_resize(self, event):
gloo.set_viewport(0, 0, *event.physical_size)
def on_draw(self, event):
gloo.clear(color=True, depth=True)
self.program.draw('triangle_strip')
if __name__ == '__main__':
c = Canvas()
app.run()
``` |
{
"source": "jiangwei221/voronoi_depth_completion",
"score": 2
} |
#### File: voronoi_depth_completion/options/options.py
```python
import sys
import argparse
import json
import os
import re
import torch
from utils import utils, keys
from options.options_utils import str2bool, print_opt, confirm_opt
def set_general_arguments(parser):
general_arg = parser.add_argument_group('General')
general_arg.add_argument('--confirm', type=str2bool, default=True, help='promote confirmation for user')
def set_data_arguments(parser):
data_arg = parser.add_argument_group('Data')
data_arg.add_argument('--shuffle_data', type=str2bool, default=True, help='use sequence dataset or shuffled dataset')
data_arg.add_argument('--load_rgb', type=str2bool, default=True, help='rgb guided')
data_arg.add_argument('--rgb2gray', type=str2bool, default=False, help='convert rgb to grayscale')
data_arg.add_argument('--invert_depth', type=str2bool, default=False, help='convert depth to disparity')
data_arg.add_argument('--norm_factor', type=float, default=256.0, help='normalize the depth image')
data_arg.add_argument('--workers', type=int, default=0, help='num of workers to fetch data')
data_arg.add_argument('--data_cut', choices=[100, 10, 1], type=int, default=1, help='1/100 data, 1/10 data or all data')
def set_voronoi_arguments(parser):
vor_arg = parser.add_argument_group('Voronoi')
vor_arg.add_argument('--vor_radius', type=float, default=10.0, help='radius of the circle/cone to render voronoi diagram')
def set_options(training: bool):
'''
This function will return an option object that
contains all the training/testing options.
Arguments:
training {bool} -- [indicating training]
'''
try:
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, 'global_config.json'), 'r') as f:
global_config = json.load(f)
except FileNotFoundError as fnf_error:
print(fnf_error)
exit(1)
parser = argparse.ArgumentParser()
set_general_arguments(parser)
set_data_arguments(parser)
set_voronoi_arguments(parser)
parser.add_argument('--out_dir', type=str, default=global_config['out'], help='out directory')
parser.add_argument('--tb_dir', type=str, default=global_config['runs'], help='tensorboard runs directory')
parser.add_argument('--batch_size', type=int, default=32, help='batch size for dataloader')
parser.add_argument('--learning_rate', type=float, default=1e-4, help='learning rate')
parser.add_argument('--learning_type', choices=[keys.UNGUIDED_COMPLETION, keys.GUIDED_COMPLETION], type=str, default='guided', help='un/guided depth completion')
parser.add_argument('--residual_learning', type=str2bool, default=True, help='using NN filling and then learning the residual')
parser.add_argument('--max_iter', type=int, default=10000, help='total training iterations')
parser.add_argument('--valid_iter', type=int, default=300, help='iterval of validation')
parser.add_argument('--resume', type=str2bool, default=False, help='resume training with same model name')
parser.add_argument('--load_weights', type=str, default=None, help='load weights from existing models')
opt = parser.parse_args()
opt.command = ' '.join(sys.argv)
opt.use_cuda = True
if opt.use_cuda:
assert torch.cuda.is_available()
opt.data_path = global_config['data_path']
opt.gt_path = global_config['gt_path']
opt.rgb_path = global_config['rgb_path']
opt.name = 'test_train_sym'
opt.out = os.path.join(opt.out_dir, opt.name)
opt.tb_out = os.path.join(opt.tb_dir, opt.name)
if opt.confirm:
confirm_opt(opt)
else:
print_opt(opt)
return opt
```
#### File: voronoi_depth_completion/options/options_utils.py
```python
import os
import sys
import json
from utils import utils
def str2bool(v):
return v.lower() in ('true', '1', 'yes', 'y', 't')
def print_opt(opt):
content_list = []
args = list(vars(opt))
args.sort()
for arg in args:
content_list += [arg.rjust(25, ' ') + ' ' + str(getattr(opt, arg))]
utils.print_notification(content_list, 'OPTIONS')
def confirm_opt(opt):
print_opt(opt)
if not utils.confirm():
exit(1)
def opt_to_string(opt) -> str:
string = '\n\n'
string += 'python ' + ' '.join(sys.argv)
string += '\n\n'
# string += '---------------------- CONFIG ----------------------\n'
args = list(vars(opt))
args.sort()
for arg in args:
string += arg.rjust(25, ' ') + ' ' + str(getattr(opt, arg)) + '\n\n'
# string += '----------------------------------------------------\n'
return string
def save_opt(opt):
'''save options to a json file
'''
# exec(utils.TEST_EMBEDDING)
with open(os.path.join(opt.out, 'params.json'), 'w') as fp:
json.dump(vars(opt), fp, indent=0, sort_keys=True)
```
#### File: voronoi_depth_completion/renderer/voronoi_renderer.py
```python
import numpy as np
from vispy import gloo
from vispy import app
from vispy.util.ptime import time
from vispy.gloo.util import _screenshot
from vispy.gloo import gl
try:
from utils import utils
except:
import sys
sys.path.append("..")
from utils import utils
# WARNING: doesn't work with Qt4 (update() does not call on_draw()??)
app.use_app('glfw')
# this part of shader copy from
# http://www.labri.fr/perso/nrougier/python-opengl/#gpu-voronoi
vertex = """
uniform vec2 resolution;
attribute vec2 center;
attribute vec3 color;
attribute float radius;
varying vec2 v_center;
varying vec3 v_color;
varying float v_radius;
void main()
{
v_radius = radius;
v_center = center;
v_color = color;
gl_PointSize = 2.0 + ceil(2.0*radius);
gl_Position = vec4(2.0*center/resolution-1.0, 0.0, 1.0);
} """
fragment = """
varying vec2 v_center;
varying vec3 v_color;
varying float v_radius;
void main()
{
vec2 p = (gl_FragCoord.xy - v_center.xy)/v_radius;
float z = 1.0 - length(p);
if (z < 0.0) discard;
gl_FragDepth = (1.0 - z);
//gl_FragColor = vec4(v_color, 1);
// use 'z' as confidence
gl_FragColor = vec4(z, z, z, 1);
gl_FragColor = vec4(v_color.x, v_color.y, z, 1);
} """
class Canvas(app.Canvas):
'''one time renderer, maybe can be extended to a service
'''
def __init__(self, size, num_points:int, center, color, radius):
'''[summary]
Arguments:
size {[type]} -- size of the rendered image
num_points {int} -- number of valid depth values
center {[type]} -- the (x, y) coordinate of each valid depth value
color {[type]} -- encode the 16bit depth value to (R, G) channels, corresponding to 'center'
'''
# We hide the canvas upon creation.
app.Canvas.__init__(self, show=False, size=size)
self._t0 = time()
# Texture where we render the scene.
self._rendertex = gloo.Texture2D(shape=size[::-1] + (4,))
# FBO.
self._fbo = gloo.FrameBuffer(self._rendertex,
gloo.RenderBuffer(size[::-1]))
# Regular program that will be rendered to the FBO.
V = np.zeros(num_points, [("center", np.float32, 2),
("color", np.float32, 3),
("radius", np.float32, 1)])
V["center"] = center
V["color"] = color
V["radius"] = radius
self.program = gloo.Program(vertex, fragment)
self.program.bind(gloo.VertexBuffer(V))
self.program['resolution'] = self.size
# We manually draw the hidden canvas.
self.update()
def on_draw(self, event):
# Render in the FBO.
with self._fbo:
gl.glEnable(gl.GL_DEPTH_TEST)
gloo.clear('black')
gloo.set_viewport(0, 0, *self.size)
self.program.draw(gl.GL_POINTS)
# Retrieve the contents of the FBO texture.
self.im = _screenshot((0, 0, self.size[0], self.size[1]))
self._time = time() - self._t0
# Immediately exit the application.
app.quit()
if __name__ == '__main__':
size = (1242, 375)
num_points = 20
center = np.random.uniform(0, 1, (num_points, 2))
# center = np.ones((num_points, 2)) * 0
center[:, 0] = center[:, 0] * size[0]
center[:, 1] = center[:, 1] * size[1]
# exec(utils.TEST_EMBEDDING)
color = np.random.uniform(0.0, 1.0, (num_points, 3))
color[:, 2] = 1.0
c = Canvas(size=size, num_points=num_points, center=center, color=color)
size = c.size
app.run()
# The rendering is done, we get the rendering output (4D NumPy array)
render = c.im
print('Finished in %.1fms.' % (c._time*1e3))
# Now, we display this image with matplotlib to check.
import matplotlib.pyplot as plt
# plt.figure(figsize=(size[0]/100., size[1]/100.), dpi=100)
plt.imshow(render, interpolation='none')
plt.show()
exec(utils.TEST_EMBEDDING)
``` |
{
"source": "JiangWeibeta/Checkerboard-Context-Model-for-Efficient-Learned-Image-Compression",
"score": 3
} |
#### File: Checkerboard-Context-Model-for-Efficient-Learned-Image-Compression/version1/MaskedConv.py
```python
import torch.nn as nn
class MaskedConv2d(nn.Conv2d):
def __init__(self, *args, **kwargs):
super(MaskedConv2d, self).__init__(*args, **kwargs)
self.register_buffer('mask', self.weight.data.clone())
_, _, kernel_height, kernel_width = self.mask.size()
self.mask.fill_(0)
for i in range(kernel_height):
for j in range(kernel_width):
if (i + j) % 2:
self.mask[:, :, i, j] = 1
def forward(self, x):
self.weight.data *= self.mask
# print(self.weight.data)
return super(MaskedConv2d, self).forward(x)
if __name__ == "__main__":
M = 128
maskedconv = MaskedConv2d(M, M * 2, 5, stride=1, padding=2)
print(maskedconv.mask)
# print(maskedconv.weight.data * maskedconv.mask)
``` |
{
"source": "jiangweiyao/ImageClassifier_PyTorch_Captum_Dash",
"score": 3
} |
#### File: jiangweiyao/ImageClassifier_PyTorch_Captum_Dash/dash_classifier.py
```python
import datetime
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import PIL.Image as Image
from io import BytesIO
import base64
import torch
import torchvision
from torchvision import datasets, models, transforms
import numpy as np
import pandas as pd
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
device = torch.device('cpu')
model = torch.load('model_conv.pth', map_location=device)
labels = np.array(open("class.txt").read().splitlines())
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
inv_normalize = transforms.Normalize(
mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225],
std=[1/0.229, 1/0.224, 1/0.225]
)
app.layout = html.Div([
html.Div([
html.H2('The Flower Classifier'),
html.Strong('This application will attempt to classify your picture into 6 different species of flowers. Drag your image file into the below box to classify. This app (and repo) is intended to demonstrate how to load a saved tensorflow model for image classification and use the model in an interactive Dash application.'),
]),
dcc.Upload(
id='upload-image',
children=html.Div([
'Drag and Drop or ',
html.A('Select Files')
]),
style={
'width': '100%',
'height': '100px',
'lineHeight': '100px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
},
# Allow multiple files to be uploaded
multiple=True
),
html.Div(id='output-image-upload'),
])
def parse_contents(contents, filename, date):
#convert uploaded image file in Pillow image file
encoded_image = contents.split(",")[1]
decoded_image = base64.b64decode(encoded_image)
bytes_image = BytesIO(decoded_image)
image = Image.open(bytes_image).convert('RGB')
img = preprocess(image)
original_image = inv_normalize(img)
original_image1 = np.transpose(original_image.squeeze().detach().numpy(), (1,2,0))
htmlimg = Image.fromarray((original_image1 * 255).astype(np.uint8))
img = img.unsqueeze(0)
pred = model(img)
print(pred.detach().numpy())
df = pd.DataFrame({'class':labels, 'probability':pred[0].detach().numpy()})
return html.Div([
# HTML images accept base64 encoded strings in the same format
# that is supplied by the upload
html.Img(src=htmlimg),
html.Hr(),
generate_table(df.sort_values(['probability'], ascending=[False]))
])
def generate_table(dataframe, max_rows=10):
return html.Table([
html.Thead(
html.Tr([html.Th(col) for col in dataframe.columns])
),
html.Tbody([
html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))
])
])
@app.callback(Output('output-image-upload', 'children'),
Input('upload-image', 'contents'),
State('upload-image', 'filename'),
State('upload-image', 'last_modified'))
def update_output(list_of_contents, list_of_names, list_of_dates):
if list_of_contents is not None:
children = [
parse_contents(c, n, d) for c, n, d in
zip(list_of_contents, list_of_names, list_of_dates)]
return children
if __name__ == '__main__':
app.run_server(debug=True)
``` |
{
"source": "jiangweiyao/RabiesRefNAAP",
"score": 3
} |
#### File: jiangweiyao/RabiesRefNAAP/Fastq_Merger.py
```python
import sys
import os
import glob
import re
from datetime import date
from gooey import Gooey, GooeyParser
import subprocess
from pathlib import Path
@Gooey(program_name='Fastq Merger',
default_size=(720, 900))
def main():
local_path = os.path.dirname(os.path.realpath(__file__))
#print(local_path)
data_path = f"{local_path}"
scaffold_helper = f"{local_path}/scaffold_cutter.R"
gapfixer_helper = f"{local_path}/gapfixer.R"
now = date.today()
home = str(Path.home())
cli = GooeyParser(description="Fastq File Merger")
required_args = cli.add_argument_group("Input Output Location", gooey_options={'columns': 1, 'show_border': True})
required_args.add_argument('--InputFolder', help="Folder containing subfolders of fastq files", required=True, widget='DirChooser')
required_args.add_argument('--OutputFolder', help="Output Folder", required=False, default=f"{home}/rabiesrefnaap_results/fastq_{now}", widget='DirChooser')
args = cli.parse_args()
OutputFolder = os.path.expanduser(args.OutputFolder)
os.makedirs(OutputFolder, exist_ok=True)
folders = next(os.walk(args.InputFolder))[1]
print(folders)
for i in folders:
read_files = glob.glob(args.InputFolder+"/"+i+"/*.fastq")
print(read_files)
with open(f"{args.OutputFolder}/{i}.fastq", "wb") as outfile:
for f in read_files:
with open(f, "rb") as infile:
for line in infile:
outfile.write(line)
if __name__ == "__main__":
sys.exit(main())
```
#### File: jiangweiyao/RabiesRefNAAP/RabiesRefNAAP_CLI.py
```python
import sys
import os
import glob
import re
from datetime import date
import argparse
import subprocess
from pathlib import Path
def main():
local_path = os.path.dirname(os.path.realpath(__file__))
#print(local_path)
data_path = f"{local_path}"
scaffold_helper = f"{local_path}/scaffold_cutter.R"
gapfixer_helper = f"{local_path}/gapfixer.R"
now = date.today()
home = str(Path.home())
cli = argparse.ArgumentParser()
cli.add_argument('-i', '--InputFolder', help="Folder containing barcoded fastq", required=True)
cli.add_argument('-o', '--OutputFolder', help=f"Output Folder. Default is {home}/rabiesrefnaap_results/output_{now}", required=False, default=f"{home}/rabiesrefnaap_results/output_{now}")
cli.add_argument('--TopN', help="The top N reference sequences with the most depth are analyzed. Default is 1.", type=int, required=False, default=1)
cli.add_argument('--MinCov', help="Amplicon regions need a minimum of this average coverage number. Default is 5.", type=int, required=False, default=5)
cli.add_argument('--threads', help="Number of threads. More is faster if your computer supports it. Default is 4.", type=int, required=False, default=4)
cli.add_argument('--verbose', help = "Keep Intermediate Files. Default is false.", required=False, default=4)
cli.add_argument('--model', help="Basecall Model", required=False, type=str, default='r10_min_high_g303')
args = cli.parse_args()
#Run fastqc and multiqc on all the fastq/fastq.gz files in the folder
subprocess.check_output(['python', local_path+'/fastqc_multiqc.py', '-i', args.InputFolder, '-o', args.OutputFolder+'/multiqc'])
subprocess.check_output(['cp', args.OutputFolder+'/multiqc/multiqc_report.html', args.OutputFolder+'/multiqc_report.html'])
#Interate over all the fastq/fastq.gz files
files = sorted([f for f in glob.glob(args.InputFolder+"/**", recursive = True) if re.search(r'(.*)\.((fastq|fq)(|\.gz))$', f)])
print(files)
OutputFolder = os.path.expanduser(args.OutputFolder)
f = open(f"{args.OutputFolder}/coverage_summary.txt", "w")
f.writelines(["filename", "\t", "reads", "\t", "mapped", "\t", "ncov", "\t", "gcov", "\t", "avelength"])
f.flush()
for i in range(0, len(files)):
filec = files[i]
base = os.path.splitext(os.path.basename(filec))[0]
base = os.path.splitext(base)[0]
print(base)
filec2 = args.OutputFolder+'/'+"filtered/"+base+"_filtered.fastq"
#Trim and filter the reads
subprocess.check_output(['python', local_path+'/seqtk_sizefilter_trim.py', '-i', filec, '-o', filec2])
#Get coverage
subprocess.check_output(['python', local_path+'/rabiescoverage.py', '-i', filec2, '-o', args.OutputFolder+'/coverage/'+base+"_coverage/"+base+"_coverage.txt", '-t', str(args.threads)])
#subprocess.check_output(['cp', args.OutputFolder+'/coverage/'+base+"_coverage/"+base+"_coverage.txt", args.OutputFolder+'/'+base+"_coverage.txt"])
subprocess.Popen(['tail', '-n 1', args.OutputFolder+'/coverage/'+base+"_coverage/"+base+"_coverage.txt"], stdout=f)
f.write("\n")
f.flush()
#Get assembly
subprocess.check_output(['python', local_path+'/refnaap_cli.py', '-i', filec2, '-o', args.OutputFolder+'/assembly/'+base+"_assembly/", '-t', str(args.threads), '--TopN', str(args.TopN), '--MinCov', str(args.MinCov)])
subprocess.check_output(['cp', args.OutputFolder+'/assembly/'+base+"_assembly/final_scaffold.fasta", args.OutputFolder+"/"+base+"_final_scaffold.fasta"])
print("progress: {}/{}".format(i+1, len(files)))
f.close()
if not args.verbose:
subprocess.check_output(['rm', '-rf', args.OutputFolder+'/coverage'])
subprocess.check_output(['rm', '-rf', args.OutputFolder+'/assembly'])
subprocess.check_output(['rm', '-rf', args.OutputFolder+'/filtered'])
subprocess.check_output(['rm', '-rf', args.OutputFolder+'/multiqc'])
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jiangweiyao/SanitizeMe",
"score": 2
} |
#### File: jiangweiyao/SanitizeMe/SanitizeMe_GUI.py
```python
import sys
import os
import glob
import re
from datetime import date
from gooey import Gooey, GooeyParser
@Gooey(default_size=(750, 820),
progress_regex=r"^progress: (?P<current>\d+)/(?P<total>\d+)$",
progress_expr="current / total * 100")
def main():
now = date.today()
cli = GooeyParser(description="Remove Host Reads from Long Read, Single End, Fastq Files")
required_args = cli.add_argument_group("Input Output", gooey_options={'show_border': True, 'columns': 1})
required_args.add_argument('--InputFolder', help="Folder containing fastq files. Only files ending in .fq, .fg.gz, .fastq, and .fastq.gz will be processed", required=True, widget='DirChooser')
required_args.add_argument('--Reference', help="Host Reference fasta or fasta.gz file", required=True, widget='FileChooser')
required_args.add_argument('--LargeReference', help = "Use this option if your reference file is greater than 4 Gigabases", required=False, widget='BlockCheckbox', action='store_true', gooey_options={ 'checkbox_label': "Yes" })
required_args.add_argument('--OutputFolder', help="Output Folder", required=False, default=f"~/dehost_output/dehost_{now}")
parser = cli.add_argument_group("Options", gooey_options={'show_border': True,'columns': 1})
parser.add_argument('--threads', help="Number of threads. More is faster if your computer supports it", type=int, required=False, default=4)
method = parser.add_mutually_exclusive_group("Sequencing Method", gooey_options={'show_border': True})
method.add_argument('--Nanopore', help = 'Data from Nanopore Sequencing', action='store_true')
method.add_argument('--PacBio', help = 'Data from PacBio Sequencing', action='store_true')
method.add_argument('--PacBioCCS', help = 'Data from PacBio CCS', action='store_true')
method.add_argument('--ShortRead', help = 'Single end short reads (Legacy support for Illumina)', action='store_true')
args = cli.parse_args()
if(args.Nanopore):
seq_method="map-ont"
elif(args.PacBio):
seq_method="map-pb"
elif(args.PacBioCCS):
seq_method="asm20"
elif(args.ShortRead):
seq_method="sr"
files = sorted([f for f in glob.glob(args.InputFolder+"/*") if re.search(r'(.*)\.((fastq|fq)(|\.gz))$', f)])
OutputFolder = os.path.expanduser(args.OutputFolder)
os.system(f"mkdir -p {OutputFolder}")
f=open(f"{OutputFolder}/cmd.log", 'w+')
for j in range(0, len(files)):
i = files[j]
base = os.path.splitext(os.path.basename(i))[0]
base = os.path.splitext(base)[0]
#print(base)
os.system(f"mkdir -p {OutputFolder}")
if args.LargeReference:
minimap2_cmd = f"minimap2 --split-prefix index_name -ax {seq_method} {args.Reference} {i} -t {args.threads} > {OutputFolder}/{base}.sam"
else:
minimap2_cmd = f"minimap2 -ax {seq_method} {args.Reference} {i} -t {args.threads} > {OutputFolder}/{base}.sam"
f.write(minimap2_cmd+'\n')
os.system(minimap2_cmd)
samtools_cmd1 = f"samtools view -u -f 4 {OutputFolder}/{base}.sam > {OutputFolder}/{base}_filtered.sam"
f.write(samtools_cmd1+'\n')
os.system(samtools_cmd1)
samtools_cmd2 = f"samtools bam2fq {OutputFolder}/{base}_filtered.sam > {OutputFolder}/{base}_filtered.fastq"
f.write(samtools_cmd2+'\n')
os.system(samtools_cmd2)
delete_cmd1 = f"rm {OutputFolder}/{base}.sam"
os.system(delete_cmd1)
f.write(delete_cmd1+'\n')
delete_cmd2 = f"rm {OutputFolder}/{base}_filtered.sam"
os.system(delete_cmd2)
f.write(delete_cmd2+'\n')
print("progress: {}/{}".format(j+1, len(files)))
f.close()
if __name__ == "__main__":
sys.exit(main())
```
#### File: jiangweiyao/SanitizeMe/SanitizeMePaired_GUI.py
```python
import sys
import os
import glob
import re
from colored import stylize, attr, fg
from datetime import date
from gooey import Gooey, GooeyParser
@Gooey(default_size=(750, 820), richtext_controls=True,
progress_regex=r"^progress: (?P<current>\d+)/(?P<total>\d+)$",
progress_expr="current / total * 100")
def main():
now = date.today()
cli = GooeyParser(description="Remove Host Reads from Paired End Short Reads")
required_args = cli.add_argument_group("Input Output", gooey_options={'show_border': True, 'columns': 1})
required_args.add_argument('--InputFolder', help="Folder containing paired fq, fq.gz, fastq, and fastq.gz files. Program will recursively find paired reads", required=True, widget='DirChooser')
required_args.add_argument('--Reference', help="Host Reference fasta or fasta.gz file", required=True, widget='FileChooser')
required_args.add_argument('--LargeReference', help = "Use this option if your reference file is greater than 4 Gigabases", required=False, widget='BlockCheckbox', action='store_true', gooey_options={ 'checkbox_label': "Yes" })
required_args.add_argument('--OutputFolder', help="Output Folder", required=False, default=f"~/dehost_output/dehost_{now}")
parser = cli.add_argument_group("Options", gooey_options={'show_border': True,'columns': 1})
parser.add_argument('--threads', help="Number of threads. More is faster if your computer supports it", type=int, required=False, default=4)
args = cli.parse_args()
for_files = sorted([f for f in glob.glob(args.InputFolder+"/**", recursive = True) if re.search(r'(.*)_(R|)1(.*)\.((fastq|fq)(|\.gz))$', f)])
rev_files = sorted([f for f in glob.glob(args.InputFolder+"/**", recursive = True) if re.search(r'(.*)_(R|)2(.*)\.((fastq|fq)(|\.gz))$', f)])
OutputFolder = os.path.expanduser(args.OutputFolder)
os.system(f"mkdir -p {OutputFolder}")
f=open(f"{OutputFolder}/cmd.log", 'w+')
if (len(for_files) != len(rev_files)):
print(stylize(f"You have unequal numbers of forward and reverse files!", fg("red") + attr("bold")))
raise Exception(stylize(f"You have {len(for_files)} forward files and {len(rev_files)} reverse files!", fg("red") + attr("bold")))
for i in range(0, len(for_files)):
#print(for_files[i])
#print(rev_files[i])
base = os.path.splitext(os.path.basename(for_files[i]))[0]
base = os.path.splitext(base)[0]
#print(base)
os.system(f"mkdir -p {OutputFolder}")
if args.LargeReference:
minimap2_cmd = f"minimap2 -ax sr {args.Reference} {for_files[i]} {rev_files[i]} -t {args.threads} --split-prefix index_name > {OutputFolder}/{base}.sam"
else:
minimap2_cmd = f"minimap2 -ax sr {args.Reference} {for_files[i]} {rev_files[i]} -t {args.threads} > {OutputFolder}/{base}.sam"
f.write(minimap2_cmd+'\n')
os.system(minimap2_cmd)
samtools_cmd1 = f"samtools view -u -f 4 {OutputFolder}/{base}.sam > {OutputFolder}/{base}_filtered.sam"
f.write(samtools_cmd1+'\n')
os.system(samtools_cmd1)
samtools_cmd2 = f"samtools bam2fq {OutputFolder}/{base}_filtered.sam > {OutputFolder}/{base}_filtered.fastq"
f.write(samtools_cmd2+'\n')
os.system(samtools_cmd2)
split1_cmd = f"cat {OutputFolder}/{base}_filtered.fastq | grep '^@.*/1$' -A 3 --no-group-separator >{OutputFolder}/{base}_filtered_r1.fastq"
split2_cmd = f"cat {OutputFolder}/{base}_filtered.fastq | grep '^@.*/2$' -A 3 --no-group-separator >{OutputFolder}/{base}_filtered_r2.fastq"
os.system(split1_cmd)
f.write(split1_cmd+'\n')
os.system(split2_cmd)
f.write(split2_cmd+'\n')
delete_cmd1 = f"rm {OutputFolder}/{base}.sam"
os.system(delete_cmd1)
f.write(delete_cmd1+'\n')
delete_cmd2 = f"rm {OutputFolder}/{base}_filtered.sam"
f.write(delete_cmd2+'\n')
os.system(delete_cmd2)
delete_cmd3 = f"rm {OutputFolder}/{base}_filtered.fastq"
os.system(delete_cmd3)
f.write(delete_cmd3+'\n')
print("progress: {}/{}".format(i+1, len(for_files)))
f.close()
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jiangwenfan/petPhil_server",
"score": 2
} |
#### File: petPhil/authenticate/views.py
```python
from django.shortcuts import render,HttpResponse
# Create your views here.
def login(request):
if(request.method == 'POST'):
print("login...")
return HttpResponse("登录成功!")
return render(request,"authenticate/login2.html")
def registered(request):
if(request.method == "POST"):
print("registered...")
return HttpResponse("注册成功!")
return render(request,"authenticate/registered2.html")
def passwdReset(request):
if(request.method == "POST"):
print("reset ...")
return HttpResponse("重置成功!")
return render(request,"authenticate/passwdReset2.html")
```
#### File: publicApi/functionModule/createQueueHandle.py
```python
import json
from publicApi.functionModule import rabbitMQHandle
from django.shortcuts import render,HttpResponse
def judgeMethod(func):
def run(request):
if request.method == "GET":
return HttpResponse("only get")
elif request.method == "POST":
return func(request)
else:
return HttpResponse("only post")
return run
OK = {"status": "ok", "messg": "队列创建成功"}
ERROR = {"status":"error","messg":"队列创建失败"}
@judgeMethod
def handle(request):
"""
实际处理
:param request:
:return:
"""
userid = request.POST.get("userid") # 获取用户的userid
print(userid)
queueNameInfo = request.POST.get("queueNameInfo") #获取队列名信息
queueNameList = json.loads(queueNameInfo) #队列名列表
for queueName in queueNameList:
print(queueName)
queueObj = rabbitMQHandle.RabbitMQHandle(queueName) #创建队列对象
if not queueObj.create(): #进行创建
return HttpResponse(json.dumps(ERROR))
return HttpResponse(json.dumps(OK))
``` |
{
"source": "jiangwenfan/pythonScripts",
"score": 3
} |
#### File: pythonScripts/autoGetShowIp/getIpServer.py
```python
import socket
import time
webData = []
def main():
tcpServerSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
tcpServerSocket.bind(("",5656))
tcpServerSocket.listen(128)
global webData
while True:
print("\n\nwait a new client:")
newClientSocket,clientAddr = tcpServerSocket.accept()
print("client info: %s"%str(clientAddr))
while True:
recvData = newClientSocket.recv(1024)
recvLength = len(recvData.decode("utf-8"))
#根据收到的数据包的大小判断是从哪里来的请求
if recvLength > 120:
#web请求
# 设置返回的头信息 header
response_headers = "HTTP/1.1 200 OK\r\n" # 200 表示找到这个资源
response_headers += "\r\n" # 空一行与body隔开
# 设置内容body
response_body = "<h1>receive data :<h1>\r\n"
#sumInfo = ""
for i in range(len(webData)):
#sumInfo = sumInfo+"\r\n"+i
ip = webData[i]
response_body += "<h2> "+ip+" <h2>\r\n"
#response_body += "<h3>binlang!!!<h3>\r\n"
# 合并返回的response数据
response = response_headers + response_body
# 返回数据给浏览器
newClientSocket.send(response.encode("utf-8")) #转码utf-8并send数据到浏览器
print("web is over!")
else:
#client send ip
ipInfo = recvData.decode("utf-8")
if len(webData) > 10:
#清空
webData.clear()
webData.append(ipInfo)
else:
webData.append(ipInfo)
print("receive data: \n%s"%ipInfo)
break
newClientSocket.close()
print("it is over!")
tcpServerSocket.close()
if __name__ == "__main__":
main()
```
#### File: pythonScripts/functionModule/des.py
```python
import binascii
from pyDes import des, CBC, PAD_PKCS5
def des_encrypt(s):
"""
DES 加密
:param s: 原始字符串
:return: 加密后字符串,16进制
"""
secret_key = '20171117'
iv = secret_key
k = des(secret_key, CBC, iv, pad=None, padmode=PAD_PKCS5)
en = k.encrypt(s, padmode=PAD_PKCS5)
return binascii.b2a_hex(en)
def des_descrypt(s):
"""
DES 解密
:param s: 加密后的字符串,16进制
:return: 解密后的字符串
"""
secret_key = '20171117'
iv = secret_key
k = des(secret_key, CBC, iv, pad=None, padmode=PAD_PKCS5)
de = k.decrypt(binascii.a2b_hex(s), padmode=PAD_PKCS5)
return de
str_en = des_encrypt('zx')
print(str_en)
str_de = des_descrypt(str_en)
print(str_de)
```
#### File: pythonScripts/functionModule/getWeather.py
```python
class Weather(object):
def __init__(self,city):
self
```
#### File: items_shelve_waste/baiduNews_shelve/main.py
```python
import os
#问题描述:在天朝上传一些文件或文件夹到云盘(比如,天翼云盘)会检测文件名,如果是文件名包含敏感词,上传进度怎会卡住。
# 获取当前目录下的所有文件和文件名,修改文件名字为:123-->1_my_23
# 进入目录,接着重复上面的步骤。
# 直到全部都是文件,则停止。
# 拓展功能: 生成日志
original_path = 'C:\\Users\\Administrator\\Documents\\Downloads\\'
def get_new_file_name(old_file_name):
"""传入旧的文件名,获取新的文件名"""
head,*tail = old_file_name #分配变量。head取出首字符,*tail取出剩余字符串. *tail表示很多单个变量,tail表示单个变量组成的列表
new_tail = "_my_" #新的字符串尾部
for j in tail:
new_tail = new_tail + j #依次取出每个字符,将它拼接到新的字符串尾部
new_file_name = head + new_tail #整合成新的文件名
return new_file_name
nameList = os.listdir(path) #获取目标路径下的文件及文件夹名列表
for name in nameList:
name_path = path +name
if os.path.isdir(name_path): #判断是目录就进入
namelist = os.listdir(name_path) #该二层目录下的所有文件及文件夹名列表
if len(namelist) == 0:
print("该目录是空的"+name_path)
else:
print(namelist)
#print("this is dir:"+name_path)
#print(name_path)
def show_dir(path):
nameList = os.listdir(path) #获取目标路径下的所有文件及文件夹名的列表
for old_file_name in nameList: #挨个取出每个文件名及其文件夹名
old_file_namePath = original_path + old_file_name #每个文件的完整路径
if os.path.isdir(old_file_namePath): #如果是目录
#os.rename(a,b) #重命名
if len(name) != 0:
#进入目录 show_dir()
else: #否则如果是文件则进行重命名
new_file_name = get_new_file_name(old_file_name) #获取新的文件名
new_file_namePath = path+new_file_name #获取新的文件路径
os.rename(namepath,new_file_namePath) #重命名
for i in nameList: #每个name都是str
#每个i表示每个旧的文件名
filepath = path+i #目标文件的完整路径
targetpath= path + new_name #改名后的文件路径。如不写会全部移动到当前目录下。
#os.rename(filepath,new_name) #成功修改第一层文件及其目录名。
print(nameList)
```
#### File: items_shelve_waste/baiduNews_shelve/progress_bar.py
```python
import os
import sys,time,threading
def get_size(file,file2,interval_time=1.5):
"""file时原始文件的路径;file2时目标文件的路径"""
status = [0]
while status[0] == 0:
origin_file_size = int(os.path.getsize(file))/1024/1024 #得到原始文件的大小,MB
current_file_size = int(os.path.getsize(file2))/1024/1024 #当前文件大小,MB
print(type(origin_file_size))
number_list = str(current_file_size / origin_file_size).split(".") #将当前文件大小和原始文件大小做比值,拿到整数和小数列表
num_one = number_list[0] # 取整数
num_two = number_list[1] # 取小数
num1, num2, num3, *null = num_two # 小数第一位和小数第二位,*null去掉其他数字位数
if num_one != 1:
print("\n" + "---------当前进度-------------------")
for one in range(int(num1)):
"打印次数最慢的"
sys.stdout.write("#")
time.sleep(0.15)
#status[0] =0
print("\r")
for two in range(int(num2)):
sys.stdout.write("#")
time.sleep(0.15)
print("\r")
for three in range(int(num3)):
sys.stdout.write("#")
time.sleep(0.15)
else:
print("操作完成!")
status[0]=1
get_size("F:\\cell.csv","F:\\doc\\git\\docker.pdf")
```
#### File: keys/handleFunction/addHandle.py
```python
from django.shortcuts import render
from functionMoudle.savePasswd import savePasswd
"""
1.根据不同响应方式返回不同的页面。
2.获取表单的提交数据,提交给addPasswd函数
3.根据返回值返回不同的响应页面
"""
def addHandle(request):
if request.method == 'GET':
return render(request, 'keys/add.html')
else:
data = {}
data['siteId'] = request.POST.get('siteId')
data['sitePasswdEncry'] = request.POST.get('sitePasswdEncry')
data['siteName'] = request.POST.get('siteName')
data['siteDomain'] = request.POST.get('siteDomain')
data['siteLoginUrl'] = request.POST.get('siteLoginUrl')
data['siteLoginArea'] = request.POST.get('siteLoginArea')
data['algor'] = request.POST.get('algor')
print(data)
if savePasswd(data):
return render(request, 'keys/addRes.html', {'status': '添加Ok'})
else:
return render(request, 'keys/addRes.html', {'status': '添加error'})
```
#### File: language/functionModules/addData.py
```python
from spokenLanguage.models import SpokenLanguage
"""
add data to table
"""
class AddData(object):
def __init__(self,**args):
self.enType = self.args['enType']
self.en = self.args['en']
self.zh = self.args['zh']
self.zh = self.args['remark']
def add(self):
record = Language()
record.enType = self.enType
record.en = en
record.zh = zh
record.remark = remark
record.status = "1"
record.save()
```
#### File: spiderData/functionHandle/oulu.py
```python
import requests
import os
from lxml import etree
import pymysql
rootPath=r'E:\#siteDATA\english-word' #保存的根据路径,不同的平台需要修改
def myRequest(url):
response = requests.get(url)
response.encoding="utf-8"
return response
"""
功能已完成,先插入单词数据,就可以了
"""
def eudic(word):
realUrl="https://dict.eudic.net/dicts/en/"+word
# response = requests.get(url=realUrl)
# response.encoding="utf-8"
response = myRequest(realUrl)
#解析
treeObj = etree.HTML(response.text)
symbolText = treeObj.xpath('//*[@id="exp-head"]/div/span[1]/span[4]/text()')[0]
fanyi= treeObj.xpath('//*[@id="ExpFCChild"]/ol/li')
print(fanyi)
fanyiText=""
for li in fanyi:
text = li.xpath('.//text()')
te = "".join(text)
fanyiText = fanyiText+te+"#"
print(fanyiText)
voiceSector = treeObj.xpath('//*[@id="exp-head"]/div/span[1]/a[2]/@data-rel')[0]
voiceUrl="https://api.frdic.com/api/v2/speech/speakweb?"+voiceSector
mp3response = myRequest(voiceUrl)
mp3Path = saveMp3(word,mp3response,"usa")
#保存 symbolText voiceRealUrl
writeToMysql(word,symbolText,mp3Path,fanyiText,"usa")
#eudic('succeed')
readMysql()
```
#### File: spokenLanguage/functionHandle/onXiuHandle.py
```python
from django.shortcuts import render,HttpResponse
def onXiuHandle(request):
#get range a data
oneData = "hello"
return render(request,'spokenLanguage/index.html',{'data':oneData})
```
#### File: language/spokenLanguage/views.py
```python
from django.shortcuts import render
from functionHandle import onXiuHandle
# Create your views here.
"昂修 spoken language"
def on(request):
return onXiuHandle.onXiuHandle()
```
#### File: pythonScripts/learnEnglish/createDifferentOrder.py
```python
import csv
import xlwt
import random
fileName = "words.csv" #目标原始csv文件
wordsListDict=[] #总的单词元素[{},{}]
wordsListList = [] # 将所有的单词信息变成列表嵌套列表的,且列表顺序是[key,value]
def create_excel(xlsname,wordsListList):
"""生成excel表格。 传入要生成的文件名,"""
workbook = xlwt.Workbook(encoding='utf-8') #创建工作簿
data_sheet = workbook.add_sheet('demo22') #创建sheet
print("ecel num:"+str(len(wordsListList)))
for j in range(len(wordsListList)): #j表示总共有多少行
for i in range(2): #长度为2 0,1,
data_sheet.write(j , i , wordsListList[j][i],)
workbook.save(xlsname)
def create_dict(line):
"""将每一行的列表都处理成dict,并添加到总的单词list中."""
one = {}
key = line[0]
value = line[1]
one[key] = value
wordsListDict.append(one)
def read_OriginalFile(filename):
"""读取文件将文件中的单词及其汉语变成列表嵌套dict"""
with open(filename,encoding='utf-8') as f: #指定以utf-8方式打开,因为文件是以utf-8方式保存的。
content = csv.reader(f)
num = 0 #统计一共读取了多少行
for row in content: #读取每一行
if len(row) != 0:
create_dict(row) #将每一行都处理成dict并添加
num += 1
if len(wordsListDict) == num:
print("读取成功")
def create_file(mode,type='xls'):
read_OriginalFile(fileName) #将目标的csv文件读取变成列表。
if mode == "chaos":
print("begin to create chaos list.........")
for i in range(len(wordsListDict)): #要产生多少次
dict = random.choice(wordsListDict) #每次随机获取一个列表元素字典
#xiabiao = wordsListDict.index(suijidict) #获取到这个字典的下标
temList = []
for key, value in dict.items(): #获取到每一个en和zh
if type == 'xls':
temList.append(key) #zh
temList.append(value) #en; 将key和value依次添加到字典。
wordsListList.append(temList) #将临时列表添加到总的列表中。
temList=[] #清空临时小列表
elif type == 'csv':
row_one = key #zh
row_two = value #en
info = row_one + "," + row_two + "\n" #每一行的写入格式。
with open('new_chaos.csv', 'a', encoding='utf-8') as f: #没读取一个列表元素,就打开文件写一行,效率不高,后期改进。
f.write(info)
else:
print("file type is error!")
wordsListDict.remove(dict) # 在原有列表中删除这个dict元素
print(wordsListList)
if type == "xls":
create_excel(xlsname='cahos.xls', wordsListList=wordsListList)
print("chaos :write ok!................")
elif mode == "shun":
print("begin to create shun list.......")
temList=[] #临时列表
for dict in wordsListDict: # dict就是每一个英汉字典{"你好":"hello"}
for key, value in dict.items(): #获取到每一个en和zh
if type == 'xls':
temList.append(key) #zh
temList.append(value) #en; 将key和value依次添加到字典。
wordsListList.append(temList) #将临时列表添加到总的列表中。
temList=[] #清空临时小列表
elif type == 'csv':
row_one = key #zh
row_two = value #en
info = row_one + "," + row_two + "\n" #每一行的写入格式。
with open('new_shun.csv', 'a', encoding='utf-8') as f: #没读取一个列表元素,就打开文件写一行,效率不高,后期改进。
f.write(info)
else:
print("file type is error!")
if type == "xls":
create_excel(xlsname='shun.xls', wordsListList=wordsListList)
print("shun :write ok!................")
elif mode == "fan":
print("start to create fan list..........")
wordsListDict.reverse() #列表反转,返回值为0
temList = [] # 临时列表
for dict in wordsListDict: #取出每一个dict{"你好":"hello"}
for key, value in dict.items():
if type == 'xls':
temList.append(key) # zh
temList.append(value) # en; 将key和value依次添加到字典。
wordsListList.append(temList) # 将临时列表添加到总的列表中。
temList = [] # 清空临时小列表
elif type == 'csv':
row_one = key # zh
row_two = value # en
info = row_one + "," + row_two + "\n" # 每一行的写入格式。
with open('new_fan.csv', 'a', encoding='utf-8') as f: # 没读取一个列表元素,就打开文件写一行,效率不高,后期改进。
f.write(info)
else:
print("file type is error!")
if type == "xls":
create_excel(xlsname='fan.xls', wordsListList=wordsListList)
print("fan : write ok........... ")
else:
print("this is error: ",mode)
def main():
print("this is create words list program")
print("请事先准备好words.csv这个原始文件. --> 推荐格式为:[主持人,presenter]这种类型的")
#file_name = str(input("please input origin file path:"))
str_mode = str(input("please input words list mode:[chaos,shun,fan]:"+"\n"))
str_type = str(input("please input view file type: [csv,xls]: ---->默认为xls"+"\n"))
print(str_type)
if str_type == "":
create_file(str_mode)
else:
create_file(str_mode,str_type)
if __name__ == '__main__':
main()
#print(wordsListDict)
```
#### File: learnEnglish/item_word_remembers/word.py
```python
import os
import time
filelist = []
t = time.localtime()
year = t.tm_year
mon = t.tm_mon
day = t.tm_mday
def show():
os.system("clear")
def readfile(FileName):
with open(FileName) as f:
for i in f:
a = i.split(',')
filelist.append(a)
def readlist():
length = len(filelist)
for j in range(length):
en = filelist[j][0]
cn = filelist[j][1].strip("\n")
show()
enter = "\n\n\n\n\n"*2
#message = input(enter+en+" : ")
cmd = "\033[1;32;40m"+en+" : "+"\033[0m"
message = input(enter+cmd)
txt = en+","+cn+" :"+message+"\n"+"\n"
filename = str(year)+"_"+str(mon)+"_"+str(day)+".txt"
with open(filename,'a') as f:
f.write(txt)
print("\n\n\n\n\n")
def main():
readfile('word.txt')
readlist()
if __name__ == '__main__':
main()
```
#### File: jiangwenfan/pythonScripts/mp4AuotMp3.py
```python
import os
import glob
import threading
import time
#fileList = os.listdir("./")
#print(len(fileList))
#print(fileList)
#for i in fileList:
# if not os.path.isfile(i):
# fileList.remove(i)
#print(len(fileList))
#print(fileList)
mp4List = glob.glob("*.mp4")
def change(mp4file):
newMp4File = mp4file.replace(":","-") #将原始文件中的中文符号替换为-
mp3file = "./mp3/"+newMp4File.split(".")[0]+".mp3"
cmd = "ffmpeg -i "+ mp4file + " -f mp3 -vn " + mp3file
print(cmd)
if not os.path.exists("mp3"):
os.mkdir("mp3")
print(threading.current_thread())
os.system(cmd)
def show(n):
time.sleep(2)
print("value:"+n)
print(threading.current_thread())
for mp4File in mp4List:
#change(mp4File)
t = threading.Thread(target=change,args=(mp4File,))
t.start()
#print(len(mp4List))
#print(mp4List)
```
#### File: pythonScripts/passwordManager_py2/teacher.py
```python
from smtplib import *
from Tkinter import *
import tkMessageBox
import string
class loginPage(object):
def __init__(self, master, info='Mail Send System'):
self.master = master
self.mainlabel = Label(master, text=info, justify=CENTER)
self.mainlabel.grid(row=0, columnspan=3)
self.user = Label(master, text='username', borderwidth=2)
self.user.grid(row=1, sticky=W)
self.pwd = Label(master, text='password', borderwidth=2)
self.pwd.grid(row=2, sticky=W)
self.userEntry = Entry(master)
self.userEntry.grid(row=1, column=1, columnspan=2)
self.userEntry.focus_set()
self.pwdEntry = Entry(master, show='*')
self.pwdEntry.grid(row=2, column=1, columnspan=2)
self.loginButton = Button(master, text='Login', borderwidth=2, command=self.login)
self.loginButton.grid(row=3, column=1)
self.clearButton = Button(master, text='Clear', borderwidth=2, command=self.clear)
self.clearButton.grid(row=3, column=2)
def login(self):
self.username = self.userEntry.get().strip()
self.passwd = self.pwdEntry.get().strip()
if len(self.username) == 0 or len(self.passwd) == 0 or '@' not in self.username:
tkMessageBox.showwarning('警告', '用户名或者密码为空或邮件格式不正确')
self.clear()
self.userEntry.focus_set()
return
self.getSmtpHost()
self.connect()
def connect(self):
'this method will try to connet the SMTP server according the current user'
HOST = 'smtp.' + self.smtp + '.com'
try:
self.mySMTP = SMTP(HOST)
self.mySMTP.login(self.username, self.passwd)
# except SMTPConnectError:
except Exception, e:
tkMessageBox.showerror('连接错误', '%s' % e)
return
self.mySendMail = sendMail(self.master, self.mySMTP, self.username)
def clear(self):
self.userEntry.delete(0, END)
self.pwdEntry.delete(0, END)
def getSmtpHost(self):
'this method try to obtian the SMTP HOST according the user account'
firstSplit = self.username.split('@')[1]
self.smtp = firstSplit.split('.')[0]
class sendMail(object):
'my sendemail class'
def __init__(self, master, smtp='', sender=''):
self.smtp = smtp
self.sender = sender
self.sendPage = Toplevel(master)
self.sendToLabel = Label(self.sendPage, text='send to:')
self.sendToLabel.grid()
self.sendToEntry = Entry(self.sendPage)
self.sendToEntry.grid(row=0, column=1)
self.subjectLabel = Label(self.sendPage, text='subject:')
self.subjectLabel.grid(row=1, column=0)
self.subjectEntry = Entry(self.sendPage)
self.subjectEntry.grid(row=1, column=1)
self.fromToLabel = Label(self.sendPage, text='from to:')
self.fromToLabel.grid(row=2, column=0)
self.formToAdd = Label(self.sendPage, text=self.sender)
self.formToAdd.grid(row=2, column=1)
self.sendText = Text(self.sendPage)
self.sendText.grid(row=3, column=0, columnspan=2)
self.sendButton = Button(self.sendPage, text='send', command=self.sendMail)
self.sendButton.grid(row=4, column=0)
self.newButton = Button(self.sendPage, text='new mail', command=self.newMail)
self.newButton.grid(row=4, column=1)
def getMailInfo(self):
self.sendToAdd = self.sendToEntry.get().strip()
self.subjectInfo = self.subjectEntry.get().strip()
self.sendTextInfo = self.sendText.get(1.0, END)
def sendMail(self):
self.getMailInfo()
body = string.join(("From: %s" % self.sender, "To: %s" % self.sendToAdd, "Subject: %s" % self.subjectInfo, "",
self.sendTextInfo), "\r\n")
try:
self.smtp.sendmail(self.sender, [self.sendToAdd], body)
except Exception, e:
tkMessageBox.showerr('发送失败', "%s" % e)
return
tkMessageBox.showinfo('提示', '邮件已发送成功!')
def newMail(self):
self.sendToEntry.delete(0, END)
self.subjectEntry.delete(0, END)
self.sendText.delete(1.0, END)
if __name__ == '__main__':
root = Tk()
root.title('简易发送邮件程序')
myLogin = loginPage(root)
# root.wait_window(myLogin.mySendMail.sendPage)
mainloop()
```
#### File: pcdn/function/uploadHandle.py
```python
from django.shortcuts import render,HttpResponse
from pcdn.models import Pcdn
import json
def uploadHandler(request):
if request.method == "POST":
payload = json.loads(request.body.decode())
print(payload)
#添加记录
record = Pcdn()
#record.nodeName =
record.hostName = payload["hostName"]
record.accountAgo =payload["accAgo"]
record.accountlater = payload["accLater"]
record.accountLog = payload["accLog"]
record.save()
return HttpResponse("okk")
else:
return HttpResponse("test-ok")
```
#### File: searchIssue/qywechat/views.py
```python
from django.shortcuts import render,HttpResponse
from qywechat.weworkapi.callback.WXBizMsgCrypt3 import WXBizMsgCrypt
import xml.etree.cElementTree as ET
import sys
from xml.dom.minidom import parse
# Create your views here.
"""
接收企业微信的消息,验证通过
"""
# def receive(request):
# if request.method == "GET":
# #校验消息
# sVerifyMsgSig = request.GET.get("msg_signature")
# sVerifyTimeStamp = request.GET.get("timestamp")
# sVerifyNonce = request.GET.get("nonce")
# #request.get("sReplyEchoStr")
# sVerifyEchoStr = request.GET.get("echostr")
# print("-----------------------------")
# print(sVerifyMsgSig)
# print(sVerifyTimeStamp)
# print(sVerifyNonce)
# print(sVerifyEchoStr)
# print("-----------------------------")
#
# # 假设企业在企业微信后台上设置的参数如下
# sToken = "<KEY>"
# sEncodingAESKey = "<KEY>"
# sCorpID = "ww0195826a8c3bbd01"
#
# # 创建解密对象
# wxcpt = WXBizMsgCrypt(sToken, sEncodingAESKey, sCorpID)
# ret, sEchoStr = wxcpt.VerifyURL(sVerifyMsgSig, sVerifyTimeStamp, sVerifyNonce, sVerifyEchoStr)
# if (ret != 0):
# print("ERR: VerifyURL ret: " + str(ret))
# print(sEchoStr)
# return HttpResponse(sEchoStr)
# #return HttpResponse("ok2")
"""
接收消息
"""
def receive(request):
#校验消息
sReqMsgSig = request.GET.get("msg_signature")
sReqTimeStamp = request.GET.get("timestamp")
sReqNonce = request.GET.get("nonce")
sReqData = str(request.body,encoding="utf-8")
#解析加密部分
# with open("t.xml",'w') as f:
# f.write(sReqData)
# dom = parse("t.xml")
# data = dom.documentElement
# #获取加密节点
# st = data.getElementsByTagName("Encrypt")[0]
#获取内容
#value = st.childNodes[0].nodeValue
# print("-----------------------------")
# print(sVerifyMsgSig)
# print(sVerifyTimeStamp)
# print(sVerifyNonce)
# print(st)
# print("-----------------------------")
# 假设企业在企业微信后台上设置的参数如下
sToken = "<KEY>"
sEncodingAESKey = "<KEY>"
sCorpID = "ww0195826a8c3bbd01"
# 创建解密对象
wxcpt = WXBizMsgCrypt(sToken, sEncodingAESKey, sCorpID)
ret, sMsg = wxcpt.DecryptMsg(sReqData, sReqMsgSig, sReqTimeStamp, sReqNonce)
if (ret != 0):
print("ERR: DecryptMsg ret: " + str(ret))
sys.exit(1)
# 解密成功,sMsg即为xml格式的明文
# TODO: 对明文的处理
# For example:
xml_tree = ET.fromstring(sMsg)
content = xml_tree.find("Content").text
print("接收到的消息:",content)
#发送消息
#sRespData = "<xml><ToUserName>ww1436e0e65a779aee</ToUserName><FromUserName>ChenJiaShun</FromUserName><CreateTime>1476422779</CreateTime><MsgType>text</MsgType><Content>你好</Content><MsgId>1456453720</MsgId><AgentID>1000002</AgentID></xml>"
sRespData = "<xml><MsgType>text</MsgType><Content>滚,你个LSP</Content></xml>"
"""
<xml>
<MsgType>text</MsgType>
<Content>你好</Content> 文本消息内容,最长不超过2048个字节,超过将截断
------
<CreateTime>1476422779</CreateTime> 消息创建时间(整型)
<ToUserName>ww1436e0e65a779aee</ToUserName> 成员ID. 企业id时,响应所有人
<FromUserName>ChenJiaShun</FromUserName> 企业微信CorpID
<MsgId>1456453720</MsgId>
<AgentID>1000002</AgentID>
</xml>
"""
ret, sEncryptMsg = wxcpt.EncryptMsg(sRespData, sReqNonce, sReqTimeStamp)
if (ret != 0):
print("ERR: DecryptMsg ret: " + str(ret))
sys.exit(1)
# ret == 0 加密成功,企业需要将sEncryptMsg返回给企业号
# TODO:
# HttpUitls.SetResponse(sEncryptMsg)
return HttpResponse(sEncryptMsg)
```
#### File: search/functionModule/pressureIp.py
```python
from django.shortcuts import render
from django.http import HttpResponse,HttpRequest
from search.models import Whitecommand,Whitebaoxiu,WhiteIps
def sip(request):
if request.method == "GET":
res = WhiteIps.objects.all().values("regionname")
result = []
for info in res:
region = info['regionname']
result.append(region)
return render(request, "search/sip.html", {"res":set(result)})
else:
yongyinshang = request.POST.get('yongyinshang')
print(yongyinshang)
region = request.POST.get('province')
print(region)
result = []
#res = WhiteIps.objects.filter(yongyinshang=yongyinshang)
res = WhiteIps.objects.filter(yongyinshang=yongyinshang,regionname=region)
#res = WhiteIps.objects.filter(regionname=region)
print(res)
print(len(res))
if(len(res) == 0):
return HttpResponse("没有符合这个条件的IP!")
else:
for ip in res:
item = {}
item['sip'] = ip.sip
item['regionname'] = ip.regionname
item['city'] = ip.city
item['yongyingshang'] = ip.yongyinshang
result.append(item)
print(item)
print(result)
return render(request, "search/sipres.html", {"res":result})
```
#### File: searchIssue/search/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse,HttpRequest
from search.models import Whitecommand,Whitebaoxiu,WhiteIps
from search.functionModule import hardwareSearch
from search.functionModule import searchIssue
from search.functionModule import pressureIp
# Create your views here.
"""
1. 菜单页面
"""
def menu(request):
if request.method == "GET":
return render(request,"menu.html")
else:
return HttpResponse("post ok!")
"""
2. 添加问题页面
"""
def add(request):
return searchIssue.add(request)
"""
3. 显示问题页面
"""
def show(request):
return searchIssue.show(request)
"""
硬件保修查询
"""
def baoxiu(request):
return hardwareSearch.hardwareHandle(request)
"""
压测ip查询
"""
def sip(request):
return pressureIp.sip(request)
def sip2(request):
return render(request,"nodeEfficiency/sip2.html")
"""
所有功能的控制显示页面
"""
def control(request):
return render(request, "search/control.html")
```
#### File: jiangwenfan/pythonScripts/sendEmail.py
```python
import smtplib
from email.header import Header
from email.mime.text import MIMEText
mail_host = "smtp.126.com"
mail_user = "<EMAIL>" # 用户名
mail_pass = "<PASSWORD>" # 授权密码
sender = '<EMAIL>' # 发件人邮箱(最好写全, 不然会失败)
receivers = ['<EMAIL>']
content = '机房电脑已经正常启动。。。'
title = 'test2程序自动发送。。'
def sendEmail():
message = MIMEText(content, 'plain', 'utf-8') # 内容, 格式, 编码
message['From'] = "{}".format(sender)
message['To'] = ",".join(receivers)
message['Subject'] = title
try:
smtpObj = smtplib.SMTP_SSL(mail_host, 465) # 启用SSL发信, 端口一般是465
smtpObj.login(mail_user, mail_pass) # 登录验证
smtpObj.sendmail(sender, receivers, message.as_string()) # 发送
print("mail has been send successfully.")
except smtplib.SMTPException as e:
print(e)
def send_email2(SMTP_host, from_account, from_passwd, to_account, subject, content):
email_client = smtplib.SMTP(SMTP_host)
email_client.login(from_account, from_passwd)
# create msg
msg = MIMEText(content, 'plain', 'utf-8')
msg['Subject'] = Header(subject, 'utf-8') # subject
msg['From'] = from_account
msg['To'] = to_account
email_client.sendmail(from_account, to_account, msg.as_string())
email_client.quit()
if __name__ == '__main__':
sendEmail()
# receiver = '***'
# send_email2(mail_host, mail_user, mail_pass, receiver, title, content)
```
#### File: jiangwenfan/pythonScripts/test.py
```python
def test(*s):
print("---",s)
test() #()
test(1) #(1,)
test(1,2) #(1,2)
test(1,2,3) #(1,2,3)
def test2(**s):
print("+-+",s)
test2() #{}
test2(name="dog") #{'name': 'dog'}
test2(name="dog",man="people") # {'name': 'dog', 'man': 'people'}
test2(name="dog",man="people",girl="gold") #{'name': 'dog', 'man': 'people', 'girl': 'gold'}
if(1):
a =10
print(a)
```
#### File: pythonScripts/wallpaperDownload/SwitchingWallpaper.py
```python
import os
import random
img_num = 0
img_path_list = []
#duration1 = "30" #壁纸默认持续时间
#img1_path = "a/b/test.png"
#img2_path = "a/b/test2.jpg"
#wallpaperConfigFile = "none" #随机生成的自定义壁纸配置文件
info_head = '''
<background>
<starttime>
<year>2020</year>
<month>3</month>
<day>07</day>
<hour>00</hour>
<minute>00</minute>
<second>00</second>
</starttime>
'''
info_tail = "</background>"
def xml1_middle(duration1,img1_path,img2_path):
info_middle = '''
<static>
<duration>'''+duration1+'''</duration>
<file>'''+img1_path+'''</file>
</static>
<transition>
<duration>'''+"1"+'''</duration>
<from>'''+img1_path+'''</from>
<to>'''+img2_path+'''</to>
</transition>
'''
return info_middle
info2_head = """
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE wallpapers SYSTEM "gnome-wp-list.dtd">
<wallpapers>
"""
def xml2_middle(wallpaperConfigFile):
info2_middle = """
<wallpaper deleted="false">
<name>mypybackground</name>
#"""+wallpaperConfigFile+"""</filename>
<options>zoom</options>
</wallpaper>
"""
return info2_middle
info2_tail = """
</wallpapers>
"""
def randomNumber(bit):
"""create random wallpaper config file name"""
randomFileName = "py";
for i in range(bit):
num = random.randint(0,9)
randomFileName = randomFileName+str(num)
return randomFileName;
def get_imgPath(): #
"""get each image path"""
path = "/home/jiang/images/" #将来传入实际目录
global img_num
img_num = int(os.popen("ls -l "+path+" | wc -l").read()) - 1
for i in range(img_num):
img_name = os.listdir(path)[i]
img_path = path+img_name
img_path_list.append(img_path)
def modifyFile1(xmlName1,duration):
"""modification first wallpaper xml config file"""
get_imgPath() #得到图片实际路径列表
with open(xmlName1,'a') as f:
f.write(info_head) #写入xml文件头
for i in range(img_num):
img1_path = img_path_list[i]
if i+2 >= len(img_path_list):
img2_path = img_path_list[0]
else:
img2_path = img_path_list[i+1]
f.write(xml1_middle(duration, img1_path, img2_path))
f.write(info_tail)
def modifyFile2(xmlName2,wallpaper_config_file):
"""modification second systemctl xml config file"""
os.system("echo \" \" >"+xmlName2) #清空配置文件
with open(xmlName2,'a') as f:
f.write(info2_head)
f.write(xml2_middle(wallpaper_config_file))
f.write(info2_tail)
def main():
"""main 文件"""
str = os.popen("lsb_release -a | grep Codename").read()
Codename = str.split()[1] #读取系统版本及
base = "/usr/share/backgrounds/contest/"
backup_file = base+Codename+ "_py3_backup" + ".xml" #生成xml1的备份文件名
os.system("cp "+base+Codename+".xml"+" "+backup_file ) #备份xml1文件
base2 = "/usr/share/gnome-background-properties/"
xmlFile2 = base2+Codename+"-wallpapers.xml"
os.system("cp "+xmlFile2+" "+base2+Codename+"-wallpapers_py3_backup.xml") #备份xml文件
fileName1 = base+randomNumber(4)+".xml"
os.system("touch "+fileName1) #create new wallpapers xml file
modifyFile1(fileName1,"60")
modifyFile2(xmlFile2,fileName1)
if __name__ == "__main__":
main()
```
#### File: white-head-mountain/pcdn/getwechat.py
```python
import requests
import sys
import json
#get wechat group name
def getProxyInfo(nodeName):
def request():
url = "https://ker.bs58i.baishancdnx.com/getProxyInfo"
params = {"searchString": nodeName}
response = requests.get(url, params)
responseDict = json.loads(response.text)
return responseDict
response = request()
"""
address: 代理的地址
name:中文节点名
ename:英文节点名
supplier_name:代理商的公司
wechat_group_name:微信群名
"""
return response
#demo
#print(getProxyInfo("联通-辽宁-辽阳-小节点-4"))
if __name__ == '__main__':
#while True:
#nodeList = str(input("node name:\n "))
#nodeList = []
#for line in sys.stdin:
# if line != "q":
# nodeList.append(line)
# break
#print("\n")
#print(getProxyInfo(input("node name: "))['wechat_group_name'])
#print(lines)
print(sys.argv)
```
#### File: white-head-mountain/pcdn/mainpcdn.py
```python
from hostNameHandle import hostNameHandle
from gethostList import get_ips
from getwechat import getProxyInfo
from gethostNameIp import getIps
from getFrequency import getFrequency
from getDownloadAccount import getInfo
from sendMessage import sendMessage
hostName=input("主机名:")
type = str(input("1 \"频繁掉线\" or 2 \"接口不在线\": "))
nodeName = hostNameHandle(hostName)
#print(nodeName)
hostList =get_ips(nodeName)
#print(hostList)
hoststr = " \n ".join(hostList) #主机名字符串已\n分隔
#print(hoststr)
hostipList = getIps(hoststr) #主机ip列表
hostipStr= "\n".join(hostipList)
proxyInfoDic = getProxyInfo(nodeName)
wecharGroup = proxyInfoDic["wechat_group_name"] #微信群
#print(proxyInfoDic)
nameNode=proxyInfoDic['name']
wxgroup=proxyInfoDic['wechat_group_name']
#print(hostipList)
def show(messageHead,messageBody):
print("\n\t 节点名:\n\t\t"+messageHead.strip("\n"))
print("\t 微信群:"+wxgroup)
message = messageHead+"\n"+messageBody
#print(message)
sendMessage(message,wxgroup)
def getres():
with open("/home/jwf/pfdx/result.txt",'r') as f:
return f.read()
if type == "1":
getFrequency(hostipStr) #输出统计信息
messageHead = nameNode+" 频繁掉线,麻烦处理一下!\n"
messageBody = getres()
show(messageHead,messageBody)
elif type == "2":
data = getInfo(hostipStr)
messageHead = nameNode+" 账号拨不上,麻烦处理一下!\n"
messageBody = data
show(messageHead,messageBody)
else:
print("input is what fuck")
```
#### File: bbsBear/indexContent/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader,RequestContext
# Create your views here.
def index(request):
temp = loader.get_template('indexContent/index.html')
content = RequestContext(request,{})
res_html = temp.render(content)
return HttpResponse(res_html)
def newest(request):
temp = loader.get_template('indexContent/newest.html')
content = RequestContext(request,{})
res_html = temp.render(content)
return HttpResponse(res_html)
``` |
{
"source": "jiangwenj02/class-balanced-loss",
"score": 3
} |
#### File: class-balanced-loss/src/generate_cifar_tfrecords.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tarfile
from six.moves import cPickle as pickle
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
def download_and_extract(data_dir, data_version):
print('=' * 80)
if data_version == '10':
print('cifar-10 dataset')
CIFAR_FILENAME = 'cifar-10-python.tar.gz'
else:
print('cifar-100 dataset')
CIFAR_FILENAME = 'cifar-100-python.tar.gz'
print('=' * 80)
CIFAR_DOWNLOAD_URL = 'https://www.cs.toronto.edu/~kriz/' + CIFAR_FILENAME
print('Download from {} and extract.'.format(CIFAR_DOWNLOAD_URL))
# download CIFAR-10 if not already downloaded.
tf.contrib.learn.datasets.base.maybe_download(CIFAR_FILENAME, data_dir,
CIFAR_DOWNLOAD_URL)
tarfile.open(os.path.join(data_dir, CIFAR_FILENAME),
'r:gz').extractall(data_dir)
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _get_file_names(data_version):
"""Returns the file names expected to exist in the input_dir."""
file_names = {}
if data_version == '10':
file_names['train'] = ['data_batch_%d' % i for i in xrange(1, 6)]
# file_names['validation'] = ['data_batch_5']
file_names['eval'] = ['test_batch']
else:
file_names['train'] = ['train']
file_names['eval'] = ['test']
return file_names
def read_pickle_from_file(filename):
with tf.gfile.Open(filename, 'rb') as f:
if sys.version_info >= (3, 0):
data_dict = pickle.load(f, encoding='bytes')
else:
data_dict = pickle.load(f)
return data_dict
def convert_to_tfrecord(input_files, output_file, data_version):
"""Converts a file to TFRecords."""
print('Generating %s' % output_file)
with tf.python_io.TFRecordWriter(output_file) as record_writer:
for input_file in input_files:
data_dict = read_pickle_from_file(input_file)
data = data_dict[b'data']
if data_version == '10':
labels = data_dict[b'labels']
elif data_version == '100': # cifar-100
labels = data_dict[b'fine_labels']
else: # cifar-20
labels = data_dict[b'coarse_labels']
num_entries_in_batch = len(labels)
for i in range(num_entries_in_batch):
example = tf.train.Example(features=tf.train.Features(
feature={
'image': _bytes_feature(data[i].tobytes()),
'label': _int64_feature(labels[i])
}))
record_writer.write(example.SerializeToString())
def main(args):
data_dir = args.data_dir
data_ver = args.CIFAR_data_version
# download_and_extract(data_dir, data_ver)
file_names = _get_file_names(data_ver)
if data_ver == '10':
CIFAR_LOCAL_FOLDER = 'cifar-10-batches-py'
else:
CIFAR_LOCAL_FOLDER = 'cifar-100-python'
input_dir = os.path.join(data_dir, CIFAR_LOCAL_FOLDER)
for mode, files in file_names.items():
input_files = [os.path.join(input_dir, f) for f in files]
if data_ver == '10':
output_file = os.path.join(data_dir, 'cifar10_' + mode + '.tfrecords')
else:
output_file = os.path.join(data_dir, 'cifar100_' + mode + '.tfrecords')
print(input_files, output_file)
# try:
# os.remove(output_file)
# except OSError:
# pass
# Convert to tf.train.Example and write the to TFRecords.
convert_to_tfrecord(input_files, output_file, data_ver)
print('Done!')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-dir',
type=str,
default='',
help='Directory to download and extract CIFAR-10/100 to.')
parser.add_argument(
'--CIFAR-data-version',
type=str,
default='10',
help='CIFAR data version, 10, 20, or 100')
args = parser.parse_args()
if args.CIFAR_data_version not in ['10', '20', '100']:
raise ValueError('--CIFAR-data-version: must be one of 10, 20, and 100')
main(args)
``` |
{
"source": "jiangwenj02/CurveNet-V1",
"score": 2
} |
#### File: CurveNet-V1/processor/processor.py
```python
import sys
import argparse
import yaml
import numpy as np
import random
import os.path as osp
# torch
import torch
import torch.nn as nn
import torch.optim as optim
# torchlight
import torchlight
from torchlight import str2bool
from torchlight import DictAction
from torchlight import import_class
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.metrics import confusion_matrix
from sklearn.metrics import recall_score
from .io import IO
import subprocess
class Processor(IO):
"""
Base Processor
"""
def __init__(self, argv=None):
self.load_arg(argv)
self.init_environment()
self.load_model()
self.load_weights()
self.gpu()
self.load_data()
self.load_optimizer()
self.label = []
def init_environment(self):
super().init_environment()
self.result = dict()
self.iter_info = dict()
self.epoch_info = dict()
self.meta_info = dict(epoch=0, iter=0)
self.set_seed(self.arg.seed)
def get_gpu_memory_map(self):
"""Get the current gpu usage.
Returns
-------
usage: dict
Keys are device ids as integers.
Values are memory usage as integers in MB.
"""
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.used',
'--format=csv,nounits,noheader'
], encoding='utf-8')
# Convert lines into a dictionary
gpu_memory = [int(x) for x in result.strip().split('\n')]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
def load_optimizer(self):
pass
def test_conf(self, evaluation=True):
self.model.eval()
loader = self.data_loader['test']
loss_value = []
result_frag = []
label_frag = []
for data, label, index, _ in loader:
# get data
data = data.float().to(self.dev)
label = label.long().to(self.dev)
# inference
with torch.no_grad():
output = self.model(data)
result_frag.append(output.data.cpu().numpy())
# get loss
if evaluation:
loss = self.loss(output, label)
loss_value.append(loss.mean().item())
label_frag.append(label.data.cpu().numpy())
self.result = np.concatenate(result_frag)
if evaluation:
self.label = np.concatenate(label_frag)
self.epoch_info['mean_loss']= np.mean(loss_value)
self.show_epoch_info()
# show top-k accuracy
for k in self.arg.show_topk:
self.show_topk(k)
rank = self.result.argsort()
rank = rank[:, -1]
plt.figure(figsize=(5,5))
confusion = confusion_matrix(self.label, rank)
print(confusion[0, :].sum())
confusion = confusion / confusion[0, :].sum()
confusion = 100 * confusion
plt.matshow(confusion, cmap=plt.cm.Greens)
plt.colorbar()
# for i in range(len(confusion)):
# for j in range(len(confusion)):
# string = str(round(confusion[i,j],1))
# plt.annotate(string, xy=(i, j), horizontalalignment='center', verticalalignment='center', fontsize=8)
plt.title('Ours', fontsize=18)
plt.ylabel('True label', fontsize=15)
plt.xlabel('Predicted label', fontsize=15)
plt.savefig(osp.join(self.arg.work_dir, 'confusion.jpg'), bbox_inches='tight')
def save_model(self, model, name):
model_path = '{}/{}'.format(self.work_dir, name)
torch.save({
'model_state_dict': self.model.state_dict(),
'sensinet_state_dict': self.sensinet.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'optimizer_sensinet_state_dict': self.optimizer_sensinet.state_dict(),
'meta_epoch': self.meta_info['epoch'],
'meta_iter': self.meta_info['iter']
}, model_path)
self.print_log('The model has been saved as {}.'.format(model_path))
def load_weights(self):
# self.arg.phase = 'test'
# self.arg.weights = osp.join(self.arg.work_dir, 'best_model.pt')
if self.arg.weights:
checkpoint = torch.load(self.arg.weights)
self.model.load_state_dict(checkpoint)
# self.model.load_state_dict(checkpoint['model_state_dict'])
# self.sensinet.load_state_dict(checkpoint['sensinet_state_dict'])
# self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# self.optimizer_sensinet.load_state_dict(checkpoint['optimizer_sensinet_state_dict'])
# self.arg.start_epoch = checkpoint['meta_epoch']
# self.meta_info['meta_iter'] = checkpoint['meta_iter']
def show_topk(self, k):
rank = self.result.argsort()
hit_top_k = [l in rank[i, -k:] for i, l in enumerate(self.label)]
hit_top_k_cls = []
hit_top_k_cls_num = []
for cls in range(self.arg.model_args['num_classes']):
hit_top_k_cls.append([(l in rank[i, -k:]) * (l == cls) for i, l in enumerate(self.label)])
hit_top_k_cls_num.append([l == cls for i, l in enumerate(self.label)])
accuracy = sum(hit_top_k) * 1.0 / len(hit_top_k)
accuracy_cls = [sum(hit_top_k_cls[i]) * 1.0 / sum(hit_top_k_cls_num[i]) for i in range(self.arg.model_args['num_classes'])]
if accuracy > self.best_acc:
self.best_acc = accuracy
filename = 'best_model.pt'
self.io.save_model(self.model, filename)
self.train_writer.add_scalar('accuracy/test_acc', 100 * accuracy, self.meta_info['epoch'])
for i in range(self.arg.model_args['num_classes']):
self.train_writer.add_scalar('accuracy/test_acc_cls_' + str(i), 100 * accuracy_cls[i], self.meta_info['epoch'])
self.io.print_log('\tTop{}: {:.2f}%'.format(k, 100 * accuracy))
self.io.print_log('\tBest accuracy Top{}: {:.2f}%'.format(k, 100 * self.best_acc))
def load_data(self):
Feeder = import_class(self.arg.feeder)
if 'debug' not in self.arg.train_feeder_args:
self.arg.train_feeder_args['debug'] = self.arg.debug
self.data_loader = dict()
if self.arg.phase == 'train':
self.data_loader['train'] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.train_feeder_args),
batch_size=self.arg.batch_size,
shuffle=True,
num_workers=self.arg.num_worker * torchlight.ngpu(
self.arg.device),
drop_last=True, pin_memory=True)
self.data_loader['meta_train'] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.train_feeder_args),
batch_size=self.arg.batch_size,
shuffle=True,
num_workers=self.arg.num_worker * torchlight.ngpu(
self.arg.device),
drop_last=True, pin_memory=True)
if self.arg.test_feeder_args:
self.data_loader['test'] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.test_feeder_args),
batch_size=self.arg.test_batch_size,
shuffle=False,
num_workers=self.arg.num_worker * torchlight.ngpu(
self.arg.device), pin_memory=True)
def show_epoch_info(self):
for k, v in self.epoch_info.items():
self.io.print_log('\t{}: {}'.format(k, v))
if self.arg.pavi_log:
self.io.log('train', self.meta_info['iter'], self.epoch_info)
def show_iter_info(self):
if self.meta_info['iter'] % self.arg.log_interval == 0:
info ='\tIter {} Done.'.format(self.meta_info['iter'])
for k, v in self.iter_info.items():
if isinstance(v, float):
info = info + ' | {}: {:.4f}'.format(k, v)
else:
info = info + ' | {}: {}'.format(k, v)
self.io.print_log(info)
if self.arg.pavi_log:
self.io.log('train', self.meta_info['iter'], self.iter_info)
def train(self):
for _ in range(100):
self.iter_info['loss'] = 0
self.show_iter_info()
self.meta_info['iter'] += 1
self.epoch_info['mean loss'] = 0
self.show_epoch_info()
def test(self):
for _ in range(100):
self.iter_info['loss'] = 1
self.show_iter_info()
self.epoch_info['mean loss'] = 1
self.show_epoch_info()
def set_seed(self, seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def start(self):
self.io.print_log('Parameters:\n{}\n'.format(str(vars(self.arg))))
# training phase
if self.arg.phase == 'train':
for epoch in range(self.arg.start_epoch, self.arg.num_epoch):
self.meta_info['epoch'] = epoch
# training
self.io.print_log('Training epoch: {}'.format(epoch))
self.train()
self.io.print_log('Done.')
# save model
if ((epoch + 1) % self.arg.save_interval == 0) or (
epoch + 1 == self.arg.num_epoch):
filename = 'epoch{}_model.pt'.format(epoch + 1)
self.io.save_model(self.model, filename)
# evaluation
if ((epoch + 1) % self.arg.eval_interval == 0) or (
epoch + 1 == self.arg.num_epoch):
self.io.print_log('Eval epoch: {}'.format(epoch))
self.test()
self.io.print_log('Done.')
# test phase
elif self.arg.phase == 'test':
# the path of weights must be appointed
if self.arg.weights is None:
raise ValueError('Please appoint --weights.')
self.io.print_log('Model: {}.'.format(self.arg.model))
self.io.print_log('Weights: {}.'.format(self.arg.weights))
# evaluation
self.io.print_log('Evaluation Start:')
self.test()
self.io.print_log('Done.\n')
# save the output of model
if self.arg.save_result:
result_dict = dict(
zip(self.data_loader['test'].dataset.sample_name,
self.result))
self.io.save_pkl(result_dict, 'test_result.pkl')
@staticmethod
def get_parser(add_help=False):
#region arguments yapf: disable
# parameter priority: command line > config > default
parser = argparse.ArgumentParser( add_help=add_help, description='Base Processor')
parser.add_argument('-w', '--work_dir', default='./work_dir/tmp', help='the work folder for storing results')
parser.add_argument('-c', '--config', default=None, help='path to the configuration file')
# processor
parser.add_argument('--phase', default='train', help='must be train or test')
parser.add_argument('--save_result', type=str2bool, default=False, help='if ture, the output of the model will be stored')
parser.add_argument('--start_epoch', type=int, default=0, help='start training from which epoch')
parser.add_argument('--num_epoch', type=int, default=80, help='stop training in which epoch')
parser.add_argument('--use_gpu', type=str2bool, default=True, help='use GPUs or not')
parser.add_argument('--device', type=int, default=0, nargs='+', help='the indexes of GPUs for training or testing')
# visulize and debug
parser.add_argument('--log_interval', type=int, default=100, help='the interval for printing messages (#iteration)')
parser.add_argument('--save_interval', type=int, default=10, help='the interval for storing models (#iteration)')
parser.add_argument('--eval_interval', type=int, default=5, help='the interval for evaluating models (#iteration)')
parser.add_argument('--save_log', type=str2bool, default=True, help='save logging or not')
parser.add_argument('--print_log', type=str2bool, default=True, help='print logging or not')
parser.add_argument('--pavi_log', type=str2bool, default=False, help='logging on pavi or not')
# feeder
parser.add_argument('--feeder', default='feeder.feeder', help='data loader will be used')
parser.add_argument('--num_worker', type=int, default=4, help='the number of worker per gpu for data loader')
parser.add_argument('--train_feeder_args', action=DictAction, default=dict(), help='the arguments of data loader for training')
parser.add_argument('--train_meta_feeder_args', action=DictAction, default=dict(), help='the arguments of meta data loader for training')
parser.add_argument('--test_feeder_args', action=DictAction, default=dict(), help='the arguments of data loader for test')
parser.add_argument('--batch_size', type=int, default=256, help='training batch size')
parser.add_argument('--test_batch_size', type=int, default=256, help='test batch size')
parser.add_argument('--debug', action="store_true", help='less data, faster loading')
# model
parser.add_argument('--model', default=None, help='the model will be used')
parser.add_argument('--model_args', action=DictAction, default=dict(), help='the arguments of model')
parser.add_argument('--weights', default=None, help='the weights for network initialization')
parser.add_argument('--ignore_weights', type=str, default=[], nargs='+', help='the name of weights which will be ignored in the initialization')
parser.add_argument('--warmup_epoch', type=int, default=0, help='the name of weights which will be ignored in the initialization')
parser.add_argument('--alpha_factor', type=float, default=0.1, help='initial learning rate')
parser.add_argument('--seed', type=int, default=1, help='the model will be used')
#endregion yapf: enable
return parser
``` |
{
"source": "jiangwenj02/Meta-weight-net_class-imbalance",
"score": 2
} |
#### File: jiangwenj02/Meta-weight-net_class-imbalance/data_utils.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision
import numpy as np
import copy
from load_corrupted_data2 import CIFAR10, CIFAR100
def build_dataset(args):
normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]])
if args.augment:
train_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
(4, 4, 4, 4), mode='reflect').squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
else:
train_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
test_transform = transforms.Compose([
transforms.ToTensor(),
normalize
])
if args.dataset == 'cifar10':
train_data_meta = CIFAR10(
root='./data', train=True, meta=True, num_meta=args.num_meta, corruption_prob=args.corruption_prob,
corruption_type=args.corruption_type, transform=train_transform, download=True)
train_data = CIFAR10(
root='./data', train=True, meta=False, num_meta=args.num_meta, corruption_prob=args.corruption_prob,
corruption_type=args.corruption_type, transform=train_transform, download=True, seed=args.seed, imblance=args.imblance, imb_factor=args.imb_factor)
test_data = CIFAR10(root='./data', train=False, transform=test_transform, download=True)
elif args.dataset == 'cifar100':
train_data_meta = CIFAR100(
root='./data', train=True, meta=True, num_meta=args.num_meta, corruption_prob=args.corruption_prob,
corruption_type=args.corruption_type, transform=train_transform, download=True)
# np.save('train_data_meta_train_data.npy', train_data_meta.train_data)
train_data = CIFAR100(
root='./data', train=True, meta=False, num_meta=args.num_meta, corruption_prob=args.corruption_prob,
corruption_type=args.corruption_type, transform=train_transform, download=True, seed=args.seed, imblance=args.imblance, imb_factor=args.imb_factor)
test_data = CIFAR100(root='./data', train=False, transform=test_transform, download=True)
# work_dir = 'cifar100_cor0.4_imb0.005'
# np.save(os.path.join(work_dir, 'train_data.npy'), train_data.train_data)
# np.save(os.path.join(work_dir, 'train_labels.npy'), train_data.train_labels)
# np.save(os.path.join(work_dir, 'true_labels.npy'), train_data.true_labels)
# np.save(os.path.join(work_dir,'meta_train_data.npy'), train_data_meta.train_data)
# np.save(os.path.join(work_dir,'meta_train_labels.npy'), train_data_meta.train_labels)
# np.save(os.path.join(work_dir,'meta_true_labels.npy'), train_data_meta.true_labels)
# np.save(os.path.join(work_dir,'test_data.npy'), test_data.test_data)
# np.save(os.path.join(work_dir,'test_labels.npy'), test_data.test_labels)
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
train_meta_loader = torch.utils.data.DataLoader(
train_data_meta, batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
return train_loader, train_meta_loader, test_loader
``` |
{
"source": "jiangwenj02/MLC",
"score": 2
} |
#### File: jiangwenj02/MLC/logger.py
```python
import logging
def get_logger(filename, local_rank):
formatter = logging.Formatter(fmt='[%(asctime)s %(levelname)s] %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.INFO)
logger.propagate = False
if filename is not None and local_rank <=0: # only log to file for first GPU
f_handler = logging.FileHandler(filename, 'a')
f_handler.setLevel(logging.INFO)
f_handler.setFormatter(formatter)
logger.addHandler(f_handler)
stdout_handler = logging.StreamHandler()
stdout_handler.setFormatter(formatter)
stdout_handler.setLevel(logging.INFO)
logger.addHandler(stdout_handler)
else: # null handlers for other GPUs
null_handler = logging.NullHandler()
null_handler.setLevel(logging.INFO)
logger.addHandler(null_handler)
return logger
``` |
{
"source": "jiangwenj02/mmclassification",
"score": 3
} |
#### File: models/utils/local_att.py
```python
import mmcv
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule,constant_init, normal_init
import torch.nn.functional as F
class LocalAtten(nn.Module):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will be
``int(channels/ratio)``. Default: 16.
conv_cfg (None or dict): Config dict for convolution layer.
Default: None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Default: (dict(type='ReLU'), dict(type='Sigmoid'))
"""
def __init__(self,
channels,
padding_size=1,
atten_size=3,
ratio=16,
conv_cfg=None,
act_cfg=(dict(type='ReLU'), dict(type='Sigmoid'))):
super(LocalAtten, self).__init__()
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert mmcv.is_tuple_of(act_cfg, dict)
self.atten_size = atten_size
self.inter_channels = int(channels / ratio)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.padding_size = padding_size
self.conv1 = ConvModule(
in_channels=channels,
out_channels=self.inter_channels,
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=channels,
out_channels=self.atten_size * self.atten_size,
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv_out = ConvModule(
in_channels=self.inter_channels,
out_channels=channels,
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
def init_weights(self, std=0.01, zeros_init=True):
for m in [self.conv1, self.conv2, self.conv_out]:
normal_init(m.conv, std=std)
if zeros_init:
constant_init(self.conv_out.conv, 0)
else:
normal_init(self.conv_out.conv, std=std)
def forward(self, x):
N, _, H, W = x.shape
x_ref = self.conv1(x)
x_ref = F.pad(x_ref, (self.padding_size,self.padding_size,self.padding_size,self.padding_size), "constant", 0)
x_ref = x_ref.unfold(2, self.atten_size, 1)
x_ref = x_ref.unfold(3, self.atten_size, 1)
x_ref = x_ref.permute(0, 2, 3, 1, 4, 5).reshape(N, H * W, self.inter_channels, self.atten_size * self.atten_size) # N, HW, C, SIZE^2
x_attn = self.conv2(x).permute(0, 2, 3, 1).reshape(N, H*W, -1, 1) #N, HW, SIZE^2, 1
x_attn = x_attn.softmax(-2)
x_ref = torch.matmul(x_ref, x_attn) #N, HW, C, 1
x_ref = x_ref.reshape(N, H, W, -1).permute(0, 3, 1, 2)
x_ref = self.conv_out(x_ref)
x = x + x_ref
return x
``` |
{
"source": "jiangwenj02/mmdetection",
"score": 2
} |
#### File: mmdetection/tools/extra_nms.py
```python
import numpy as np
def color_libs(index=0):
clibs = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255),(255,0,255)]
index = index%5
return clibs[index]
def xyxy2xywh(bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0] + 1,
_bbox[3] - _bbox[1] + 1,
]
def py_cpu_nms(dets,scores, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
#scores = dets[:, 4] #bbox打分
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
#打分从大到小排列,取index
order = scores.argsort()[::-1]
#keep为最后保留的边框
keep = []
while order.size > 0:
#order[0]是当前分数最大的窗口,肯定保留
i = order[0]
keep.append(i)
#计算窗口i与其他所有窗口的交叠部分的面积
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
#交/并得到iou值
ovr = inter / (areas[i] + areas[order[1:]] - inter)
#inds为所有与窗口i的iou值小于threshold值的窗口的index,其他窗口此次都被窗口i吸收
inds = np.where(ovr <= thresh)[0]
#order里面只保留与窗口i交叠面积小于threshold的那些窗口,由于ovr长度比order长度少1(不包含i),所以inds+1对应到保留的窗口
order = order[inds + 1]
return keep
def py_cpu_softnms(dets, sc, Nt=0.3, sigma=0.5, thresh=0.001, method=2):
"""
py_cpu_softnms
:param dets: boexs 坐标矩阵 format [y1, x1, y2, x2]
:param sc: 每个 boxes 对应的分数
:param Nt: iou 交叠门限
:param sigma: 使用 gaussian 函数的方差
:param thresh: 最后的分数门限
:param method: 使用的方法
:return: 留下的 boxes 的 index
"""
# indexes concatenate boxes with the last column
N = dets.shape[0]
indexes = np.array([np.arange(N)])
dets = np.concatenate((dets, indexes.T), axis=1)
# the order of boxes coordinate is [y1,x1,y2,x2]
y1 = dets[:, 0]
x1 = dets[:, 1]
y2 = dets[:, 2]
x2 = dets[:, 3]
scores = sc
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
for i in range(N):
# intermediate parameters for later parameters exchange
tBD = dets[i, :].copy()
tscore = scores[i].copy()
tarea = areas[i].copy()
pos = i + 1
#
if i != N-1:
maxscore = np.max(scores[pos:], axis=0)
maxpos = np.argmax(scores[pos:], axis=0)
else:
maxscore = scores[-1]
maxpos = 0
if tscore < maxscore:
dets[i, :] = dets[maxpos + i + 1, :]
dets[maxpos + i + 1, :] = tBD
tBD = dets[i, :]
scores[i] = scores[maxpos + i + 1]
scores[maxpos + i + 1] = tscore
tscore = scores[i]
areas[i] = areas[maxpos + i + 1]
areas[maxpos + i + 1] = tarea
tarea = areas[i]
# IoU calculate
xx1 = np.maximum(dets[i, 1], dets[pos:, 1])
yy1 = np.maximum(dets[i, 0], dets[pos:, 0])
xx2 = np.minimum(dets[i, 3], dets[pos:, 3])
yy2 = np.minimum(dets[i, 2], dets[pos:, 2])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[pos:] - inter)
# Three methods: 1.linear 2.gaussian 3.original NMS
if method == 1: # linear
weight = np.ones(ovr.shape)
weight[ovr > Nt] = weight[ovr > Nt] - ovr[ovr > Nt]
elif method == 2: # gaussian
weight = np.exp(-(ovr * ovr) / sigma)
else: # original NMS
weight = np.ones(ovr.shape)
weight[ovr > Nt] = 0
scores[pos:] = weight * scores[pos:]
# select the boxes and keep the corresponding indexes
inds = dets[:, 4][scores > thresh]
keep = inds.astype(int)
return keep
nms_threshold = 0.3
def nms_result(json_result):
boxes = []
boxscores = []
for result in json_result['results']:
boxes.append([int(result['bbox'][1]),int(result['bbox'][0]),int(result['bbox'][1])+int(result['bbox'][3]),int(result['bbox'][0])+int(result['bbox'][2])])
boxscores.append(result['score'])
boxes = np.array(boxes,dtype = np.float32)
boxscores = np.array(boxscores,dtype = np.float32)
#print(boxes)
if len(boxes)>0:
#index = py_cpu_softnms(boxes, boxscores, method=3)
indexes = py_cpu_nms(boxes,boxscores,nms_threshold)
#print(index)
temp_list = []
for index in indexes:
temp_list.append(json_result['results'][int(index)])
json_result['results']=temp_list
``` |
{
"source": "jiangwenj02/SOLO",
"score": 3
} |
#### File: SOLO/tools/voc_eval.py
```python
from argparse import ArgumentParser
import mmcv
from mmdet import datasets
from mmdet.core import eval_map
def voc_eval(result_file, dataset, iou_thr=0.5, nproc=4):
det_results = mmcv.load(result_file)
annotations = [dataset.get_ann_info(i) for i in range(len(dataset))]
if hasattr(dataset, 'year') and dataset.year == 2007:
dataset_name = 'voc07'
else:
dataset_name = dataset.CLASSES
eval_map(
det_results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=dataset_name,
logger='print',
nproc=nproc)
def main():
parser = ArgumentParser(description='VOC Evaluation')
parser.add_argument('result', help='result file path')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--iou-thr',
type=float,
default=0.5,
help='IoU threshold for evaluation')
parser.add_argument(
'--nproc',
type=int,
default=4,
help='Processes to be used for computing mAP')
args = parser.parse_args()
cfg = mmcv.Config.fromfile(args.config)
test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets)
voc_eval(args.result, test_dataset, args.iou_thr, args.nproc)
if __name__ == '__main__':
main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.